From e00d5172308dd42f0b5e45bfa0190c76d4fe44ab Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 22:56:04 +0300 Subject: [PATCH 01/72] Add files via upload Added assignment and store-into-other NLR tests for every activation function. --- Test_NetworkLevelReasoner.h | 3638 +++++++++++++++++++++++++++++++++++ 1 file changed, 3638 insertions(+) create mode 100644 Test_NetworkLevelReasoner.h diff --git a/Test_NetworkLevelReasoner.h b/Test_NetworkLevelReasoner.h new file mode 100644 index 0000000000..5e98d7fb3a --- /dev/null +++ b/Test_NetworkLevelReasoner.h @@ -0,0 +1,3638 @@ +/********************* */ +/*! \file Test_NetworkLevelReasoner.h + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Andrew Wu + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#include "../../engine/tests/MockTableau.h" // TODO: fix this +#include "FloatUtils.h" +#include "Layer.h" +#include "NetworkLevelReasoner.h" +#include "Options.h" +#include "Query.h" +#include "Tightening.h" +#include "Vector.h" + +#include + +class MockForNetworkLevelReasoner +{ +public: +}; + +class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite +{ +public: + MockForNetworkLevelReasoner *mock; + + void setUp() + { + TS_ASSERT( mock = new MockForNetworkLevelReasoner ); + } + + void tearDown() + { + TS_ASSERT_THROWS_NOTHING( delete mock ); + } + + void populateNetwork( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithSigmoids( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithAbs( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithSign( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithRound( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithLeakyRelu( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + + + void populateNetworkWithMax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x b e + g + y c f + d + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::MAX, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -2 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 2 ); + nlr.setWeight( 0, 1, 1, 3, -3 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 0, 2 ); + + // Mark the Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void populateNetworkWithSoftmax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SOFTMAX, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithBilinear( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x b e + g + y c f + d + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::BILINEAR, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -2 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 2 ); + nlr.setWeight( 0, 1, 1, 3, -3 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 0, 2 ); + + // Mark the Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void populateNetworkWithAbsAndRelu( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round/Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithRoundAndSign( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round/Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithLeakyReluAndSigmoid( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the LeakyReLU/Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d + b f + y e + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + + void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d + b f + y e + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void test_evaluate_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + double input[2]; + double output[2]; + + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); + + // With ReLUs, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); + } + + void test_evaluate_sigmoids() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSigmoids( nlr ); + + double input[2]; + double output[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); + + // case 3 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); + } + + void test_evaluate_abs() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbs( nlr ); + + double input[2]; + double output[2]; + + // With Abs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Abs, case 1 + input[0] = -2; + input[1] = -2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Abs, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); + } + + void test_evaluate_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSign( nlr ); + + double input[2]; + double output[2]; + + // With Sign, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Sign, case 1 + input[0] = -2; + input[1] = -2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Sign, case 2 (0 considered "non-negative", sign(0)=1) + input[0] = -1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); + } + + void test_evaluate_round() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRound( nlr ); + + double input[2]; + double output[2]; + + // With Round, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Round, case 1 + input[0] = 2.1; + input[1] = 1.4; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); + + // With Round, case 2 + input[0] = 2.1; + input[1] = 1.6; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); + } + + void test_evaluate_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyRelu( nlr ); + + double input[2]; + double output[2]; + + // With Leaky ReLU, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Leaky ReLU, case 1 (alpha=0.1) + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); + + // With Leaky ReLU, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); + } + + void test_evaluate_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithMax( nlr ); + + double input[2]; + double output[1]; + + // With Max, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); + + // With Max, case 1 + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); + + // With Max, case 2 + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); + } + + + void test_evaluate_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + double input[2]; + double output[2]; + + // With Softmax, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); + + // With Softmax, case 1 + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); + + // With Softmax, case 2 + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); + } + + void test_evaluate_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + double input[2]; + double output[1]; + + // With Bilinear, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + + // With Bilinear, case 1 + input[0] = 0.1; + input[1] = -0.3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); + + // With Bilinear, case 2 + input[0] = -0.3; + input[1] = 0.3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); + } + + void test_evaluate_non_consecutive_layers() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); + + // Evaluate + double input[2]; + double output; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); + + input[0] = -1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); + + TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); + } + + void test_evaluate_abs_and_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbsAndRelu( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + } + + void test_evaluate_round_and_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRoundAndSign( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1.6; + input[1] = 1.4; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); + + input[0] = 1.6; + input[1] = 1.6; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); + } + + void test_evaluate_leaky_relu_and_sigmoid() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); + } + + void test_evaluate_softmax_and_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmaxAndMax( nlr ); + + double input[2]; + double output[1]; + + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); + + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); + } + + void test_evaluate_relu_and_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithReluAndBilinear( nlr ); + + double input[2]; + double output[1]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + } + + void test_store_into_other() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_sigmoids() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSigmoids( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_round() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRound( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSign( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_abs() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbs( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyRelu( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithMax( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_interval_arithmetic_bound_propagation_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : expectedBounds ) + TS_ASSERT( bounds.exists( bound ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds2.size(), bounds.size() ); + for ( const auto &bound : expectedBounds2 ) + TS_ASSERT( bounds.exists( bound ) ); + } + + void test_interval_arithmetic_bound_propagation_abs_constraints() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Layer dependenices + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : expectedBounds ) + TS_ASSERT( bounds.exists( bound ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_EQUALS( expectedBounds2.size(), bounds.size() ); + for ( const auto &bound : expectedBounds2 ) + TS_ASSERT( bounds.exists( bound ) ); + } + + void populateNetworkSBT( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void test_sbt_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : expectedBounds ) + TS_ASSERT( bounds.exists( bound ) ); + } + + void test_sbt_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. + Coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = 0.5x0 + 1.25x1 - 11.25 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_abs_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both absolute values positive, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_abs_positive_and_negative() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_absolute_values_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is undecided, bounds are concretized. + Second ReLU is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2 is eliminated, everything set to -3 + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Second absolute value is positive, bounds surive the activation + + x4: all set to 3 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + printf( "Dumpign discovered bounds:\n" ); + for ( const auto &bound : bounds ) + bound.dump(); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_generate_input_query() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Variable indexing + + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + // Set the weights and biases for the weighted sum layers + + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 0 ); + nlr.setBias( 1, 2, 0 ); + + nlr.setBias( 3, 0, 0 ); + nlr.setBias( 3, 1, 2 ); + + nlr.setBias( 5, 0, 0 ); + nlr.setBias( 5, 1, 0 ); + + // Mark the ReLU/Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Start the testing + Query ipq; + nlr.generateQuery( ipq ); + List unhandledEquations; + Set varsInUnhandledConstraints; + TS_ASSERT( + ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); + NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + } + + void test_simulate_relus() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With ReLUs, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With ReLUs, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 1 ) ); + } + + // With ReLUs, case 1 and 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0 ) ); + } + } + + void test_simulate_sigmoids() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSigmoids( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6750, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 3.0167, + 0.0001 ) ); + } + + // case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6032, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.5790, + 0.0001 ) ); + } + + // case 3 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.5045, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.1957, + 0.0001 ) ); + } + } + + void test_simulate_non_consecutive_layers() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2 ) ); + + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + + void test_simulate_relus_and_abs() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU/Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2 ) ); + } + + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 4 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + } + + void test_concretize_input_assignment() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + + populateNetwork( nlr ); + + // With ReLUs, Inputs are zeros, only biases count + tableau.nextValues[0] = 0; + tableau.nextValues[1] = 0; + + Map assignment; + + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); + + TS_ASSERT( assignment.size() == 14 ); + TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 4 ) ); + + // With ReLUs, case 1 + tableau.nextValues[0] = 1; + tableau.nextValues[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); + + TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 1 ) ); + + // With ReLUs, case 2 + tableau.nextValues[0] = 1; + tableau.nextValues[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); + + TS_ASSERT( FloatUtils::areEqual( assignment[12], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); + } + + + void test_obtain_bound_from_ipq() + { + NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); + + Query query; + query.setNumberOfVariables( 14 ); + + + // Initialize the bounds + query.setLowerBound( 0, -1 ); + query.setUpperBound( 0, 1 ); + query.setLowerBound( 1, -1 ); + query.setUpperBound( 1, 1 ); + + double large = 1000; + query.setLowerBound( 2, -large ); + query.setUpperBound( 2, large ); + query.setLowerBound( 3, -large ); + query.setUpperBound( 3, large ); + query.setLowerBound( 4, -large ); + query.setUpperBound( 4, large ); + query.setLowerBound( 5, -large ); + query.setUpperBound( 5, large ); + query.setLowerBound( 6, -large ); + query.setUpperBound( 6, large ); + query.setLowerBound( 7, -large ); + query.setUpperBound( 7, large ); + query.setLowerBound( 8, -large ); + query.setUpperBound( 8, large ); + query.setLowerBound( 9, -large ); + query.setUpperBound( 9, large ); + query.setLowerBound( 10, -large ); + query.setUpperBound( 10, large ); + query.setLowerBound( 11, -large ); + query.setUpperBound( 11, large ); + query.setLowerBound( 12, -large ); + query.setUpperBound( 12, large ); + query.setLowerBound( 13, -large ); + query.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds( query ) ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : expectedBounds ) + TS_ASSERT( bounds.exists( bound ) ); + } +}; From 04cc0b19e373462221aeac8fadaca10cb2f92a27 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 22:58:53 +0300 Subject: [PATCH 02/72] Revert last change. --- Test_NetworkLevelReasoner.h | 3638 ----------------------------------- 1 file changed, 3638 deletions(-) delete mode 100644 Test_NetworkLevelReasoner.h diff --git a/Test_NetworkLevelReasoner.h b/Test_NetworkLevelReasoner.h deleted file mode 100644 index 5e98d7fb3a..0000000000 --- a/Test_NetworkLevelReasoner.h +++ /dev/null @@ -1,3638 +0,0 @@ -/********************* */ -/*! \file Test_NetworkLevelReasoner.h - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz, Andrew Wu - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - -**/ - -#include "../../engine/tests/MockTableau.h" // TODO: fix this -#include "FloatUtils.h" -#include "Layer.h" -#include "NetworkLevelReasoner.h" -#include "Options.h" -#include "Query.h" -#include "Tightening.h" -#include "Vector.h" - -#include - -class MockForNetworkLevelReasoner -{ -public: -}; - -class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -{ -public: - MockForNetworkLevelReasoner *mock; - - void setUp() - { - TS_ASSERT( mock = new MockForNetworkLevelReasoner ); - } - - void tearDown() - { - TS_ASSERT_THROWS_NOTHING( delete mock ); - } - - void populateNetwork( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithSigmoids( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithAbs( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithSign( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithRound( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithLeakyRelu( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - - - void populateNetworkWithMax( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x b e - g - y c f - d - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::MAX, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -2 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 2 ); - nlr.setWeight( 0, 1, 1, 3, -3 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 0, 2 ); - - // Mark the Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - void populateNetworkWithSoftmax( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SOFTMAX, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Softmax sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 0, 4, 1 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithBilinear( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x b e - g - y c f - d - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::BILINEAR, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -2 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 2 ); - nlr.setWeight( 0, 1, 1, 3, -3 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 0, 2 ); - - // Mark the Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - void populateNetworkWithAbsAndRelu( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Round/Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithRoundAndSign( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Round/Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithLeakyReluAndSigmoid( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the LeakyReLU/Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d - b f - y e - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - - void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d - b f - y e - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - void test_evaluate_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); - - double input[2]; - double output[2]; - - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); - - // With ReLUs, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); - } - - void test_evaluate_sigmoids() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - double input[2]; - double output[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); - - // case 3 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); - } - - void test_evaluate_abs() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbs( nlr ); - - double input[2]; - double output[2]; - - // With Abs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Abs, case 1 - input[0] = -2; - input[1] = -2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Abs, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); - } - - void test_evaluate_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSign( nlr ); - - double input[2]; - double output[2]; - - // With Sign, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Sign, case 1 - input[0] = -2; - input[1] = -2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Sign, case 2 (0 considered "non-negative", sign(0)=1) - input[0] = -1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); - } - - void test_evaluate_round() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRound( nlr ); - - double input[2]; - double output[2]; - - // With Round, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Round, case 1 - input[0] = 2.1; - input[1] = 1.4; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - - // With Round, case 2 - input[0] = 2.1; - input[1] = 1.6; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); - } - - void test_evaluate_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - double input[2]; - double output[2]; - - // With Leaky ReLU, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Leaky ReLU, case 1 (alpha=0.1) - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); - - // With Leaky ReLU, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); - } - - void test_evaluate_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithMax( nlr ); - - double input[2]; - double output[1]; - - // With Max, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); - - // With Max, case 1 - input[0] = 1; - input[1] = -3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); - - // With Max, case 2 - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); - } - - - void test_evaluate_softmax() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); - - double input[2]; - double output[2]; - - // With Softmax, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); - - // With Softmax, case 1 - input[0] = 1; - input[1] = -3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); - - // With Softmax, case 2 - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); - } - - void test_evaluate_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithBilinear( nlr ); - - double input[2]; - double output[1]; - - // With Bilinear, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - - // With Bilinear, case 1 - input[0] = 0.1; - input[1] = -0.3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); - - // With Bilinear, case 2 - input[0] = -0.3; - input[1] = 0.3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); - } - - void test_evaluate_non_consecutive_layers() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Set the weights and relus - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); - - // Evaluate - double input[2]; - double output; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); - - input[0] = -1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); - - TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); - } - - void test_evaluate_abs_and_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbsAndRelu( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - } - - void test_evaluate_round_and_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRoundAndSign( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1.6; - input[1] = 1.4; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); - - input[0] = 1.6; - input[1] = 1.6; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - } - - void test_evaluate_leaky_relu_and_sigmoid() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyReluAndSigmoid( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); - } - - void test_evaluate_softmax_and_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmaxAndMax( nlr ); - - double input[2]; - double output[1]; - - input[0] = 1; - input[1] = -3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); - - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); - } - - void test_evaluate_relu_and_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithReluAndBilinear( nlr ); - - double input[2]; - double output[1]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - } - - void test_store_into_other() - { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_sigmoids() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_round() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRound( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSign( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_abs() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbs( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithMax( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_softmax() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithBilinear( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_interval_arithmetic_bound_propagation_relu_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds2.size(), bounds.size() ); - for ( const auto &bound : expectedBounds2 ) - TS_ASSERT( bounds.exists( bound ) ); - } - - void test_interval_arithmetic_bound_propagation_abs_constraints() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Layer dependenices - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds2.size(), bounds.size() ); - for ( const auto &bound : expectedBounds2 ) - TS_ASSERT( bounds.exists( bound ) ); - } - - void populateNetworkSBT( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - 2 R 1 - x0 --- x2 ---> x4 --- x6 - \ / / - 1 \ / / - \/ -1 / - /\ / - 3 / \ / - / \ R / - x1 --- x3 ---> x5 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 7 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - } - - void test_sbt_relus_all_active() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Both ReLUs active, bound survive through activations: - - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ - - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); - } - - void test_sbt_relus_active_and_inactive() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is inactive, bounds get zeroed - Second ReLU is active, bounds surive the activation - - x4.lb = 0 - x4.ub = 0 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] - */ - - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_sbt_relus_active_and_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is undecided, bound is concretized. - Coefficient: 12/(12--4) = 12/16 = 0.75 - Second ReLU is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 - x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = 0.5x0 + 1.25x1 - 11.25 - x6.ub = 0.5x0 + 1.25x1 - 8.25 - - x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_sbt_relus_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, one of the ReLU's variables has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is inactive (set externally), bounds get zeroed - Second ReLU is active, bounds surive the activation - - x4.lb = 0 - x4.ub = 0 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_sbt_abs_all_positive() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Both absolute values positive, bound survive through activations: - - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ - - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_sbt_abs_positive_and_negative() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First absolute value is negative, bounds get flipped - Second absolute value is positive, bounds surive the activation - - x4.lb = -2x0 -3x1 + 30 : [3, 19] - x4.ub = -2x0 -3x1 + 30 : [3, 19] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] - x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] - */ - - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 19, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_sbt_absolute_values_positive_and_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First absolute value is undecided, bounds are concretized. - Second ReLU is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0 - x4.ub = 12 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 + 12 : [ 1, 7] - - x6 range: [-11, 7] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, 7, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_sbt_absolute_values_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, the weighted sum variable has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2 is eliminated, everything set to -3 - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Second absolute value is positive, bounds surive the activation - - x4: all set to 3 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 + 3 : [-8, -2] - x6.ub = - x0 - x1 + 3 : [-8, -2] - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, -2, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - printf( "Dumpign discovered bounds:\n" ); - for ( const auto &bound : bounds ) - bound.dump(); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_generate_input_query() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Variable indexing - - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - - // Set the weights and biases for the weighted sum layers - - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 1, 1, 0 ); - nlr.setBias( 1, 2, 0 ); - - nlr.setBias( 3, 0, 0 ); - nlr.setBias( 3, 1, 2 ); - - nlr.setBias( 5, 0, 0 ); - nlr.setBias( 5, 1, 0 ); - - // Mark the ReLU/Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Start the testing - Query ipq; - nlr.generateQuery( ipq ); - List unhandledEquations; - Set varsInUnhandledConstraints; - TS_ASSERT( - ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); - NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - } - - void test_simulate_relus() - { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With ReLUs, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With ReLUs, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 1 ) ); - } - - // With ReLUs, case 1 and 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0 ) ); - } - } - - void test_simulate_sigmoids() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.6750, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 3.0167, - 0.0001 ) ); - } - - // case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.6032, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.5790, - 0.0001 ) ); - } - - // case 3 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.5045, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.1957, - 0.0001 ) ); - } - } - - void test_simulate_non_consecutive_layers() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Set the weights and relus - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // Simulate1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2 ) ); - - // Simulate2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } - - void test_simulate_relus_and_abs() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU/Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // Simulate1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2 ) ); - } - - // Simulate2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 4 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - } - - void test_concretize_input_assignment() - { - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - - populateNetwork( nlr ); - - // With ReLUs, Inputs are zeros, only biases count - tableau.nextValues[0] = 0; - tableau.nextValues[1] = 0; - - Map assignment; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); - - TS_ASSERT( assignment.size() == 14 ); - TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 4 ) ); - - // With ReLUs, case 1 - tableau.nextValues[0] = 1; - tableau.nextValues[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); - - TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 1 ) ); - - // With ReLUs, case 2 - tableau.nextValues[0] = 1; - tableau.nextValues[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); - - TS_ASSERT( FloatUtils::areEqual( assignment[12], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); - } - - - void test_obtain_bound_from_ipq() - { - NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - - Query query; - query.setNumberOfVariables( 14 ); - - - // Initialize the bounds - query.setLowerBound( 0, -1 ); - query.setUpperBound( 0, 1 ); - query.setLowerBound( 1, -1 ); - query.setUpperBound( 1, 1 ); - - double large = 1000; - query.setLowerBound( 2, -large ); - query.setUpperBound( 2, large ); - query.setLowerBound( 3, -large ); - query.setUpperBound( 3, large ); - query.setLowerBound( 4, -large ); - query.setUpperBound( 4, large ); - query.setLowerBound( 5, -large ); - query.setUpperBound( 5, large ); - query.setLowerBound( 6, -large ); - query.setUpperBound( 6, large ); - query.setLowerBound( 7, -large ); - query.setUpperBound( 7, large ); - query.setLowerBound( 8, -large ); - query.setUpperBound( 8, large ); - query.setLowerBound( 9, -large ); - query.setUpperBound( 9, large ); - query.setLowerBound( 10, -large ); - query.setUpperBound( 10, large ); - query.setLowerBound( 11, -large ); - query.setUpperBound( 11, large ); - query.setLowerBound( 12, -large ); - query.setUpperBound( 12, large ); - query.setLowerBound( 13, -large ); - query.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds( query ) ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); - } -}; From 80be6dc2f9b36113fec67121020124eb25428ea3 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:01:33 +0300 Subject: [PATCH 03/72] Added assginment, store-into-other NLR tests for every activation function. --- src/nlr/tests/Test_NetworkLevelReasoner.h | 1720 +++++++++++++++++++-- 1 file changed, 1574 insertions(+), 146 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 8afb4e0328..5e98d7fb3a 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -187,219 +187,1624 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } + + void populateNetworkWithAbs( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithSign( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithRound( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithLeakyRelu( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + + + void populateNetworkWithMax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x b e + g + y c f + d + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::MAX, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -2 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 2 ); + nlr.setWeight( 0, 1, 1, 3, -3 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 0, 2 ); + + // Mark the Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void populateNetworkWithSoftmax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SOFTMAX, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithBilinear( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x b e + g + y c f + d + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::BILINEAR, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -2 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 2 ); + nlr.setWeight( 0, 1, 1, 3, -3 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 0, 2 ); + + // Mark the Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void populateNetworkWithAbsAndRelu( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round/Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithRoundAndSign( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round/Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithLeakyReluAndSigmoid( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the LeakyReLU/Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d + b f + y e + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + + void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d + b f + y e + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void test_evaluate_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + double input[2]; + double output[2]; + + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); + + // With ReLUs, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); + } + + void test_evaluate_sigmoids() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSigmoids( nlr ); + + double input[2]; + double output[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); + + // case 3 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); + } + + void test_evaluate_abs() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbs( nlr ); + + double input[2]; + double output[2]; + + // With Abs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Abs, case 1 + input[0] = -2; + input[1] = -2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Abs, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); + } + + void test_evaluate_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSign( nlr ); + + double input[2]; + double output[2]; + + // With Sign, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Sign, case 1 + input[0] = -2; + input[1] = -2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Sign, case 2 (0 considered "non-negative", sign(0)=1) + input[0] = -1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); + } + + void test_evaluate_round() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRound( nlr ); + + double input[2]; + double output[2]; + + // With Round, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Round, case 1 + input[0] = 2.1; + input[1] = 1.4; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); + + // With Round, case 2 + input[0] = 2.1; + input[1] = 1.6; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); + } + + void test_evaluate_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyRelu( nlr ); + + double input[2]; + double output[2]; + + // With Leaky ReLU, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Leaky ReLU, case 1 (alpha=0.1) + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); + + // With Leaky ReLU, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); + } + + void test_evaluate_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithMax( nlr ); - void test_evaluate_relus() + double input[2]; + double output[1]; + + // With Max, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); + + // With Max, case 1 + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); + + // With Max, case 2 + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); + } + + + void test_evaluate_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + double input[2]; + double output[2]; + + // With Softmax, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); + + // With Softmax, case 1 + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); + + // With Softmax, case 2 + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); + } + + void test_evaluate_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + double input[2]; + double output[1]; + + // With Bilinear, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + + // With Bilinear, case 1 + input[0] = 0.1; + input[1] = -0.3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); + + // With Bilinear, case 2 + input[0] = -0.3; + input[1] = 0.3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); + } + + void test_evaluate_non_consecutive_layers() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); + + // Evaluate + double input[2]; + double output; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); + + input[0] = -1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); + + TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); + } + + void test_evaluate_abs_and_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbsAndRelu( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + } + + void test_evaluate_round_and_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRoundAndSign( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1.6; + input[1] = 1.4; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); + + input[0] = 1.6; + input[1] = 1.6; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); + } + + void test_evaluate_leaky_relu_and_sigmoid() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); + } + + void test_evaluate_softmax_and_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmaxAndMax( nlr ); + + double input[2]; + double output[1]; + + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); + + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); + } + + void test_evaluate_relu_and_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithReluAndBilinear( nlr ); + + double input[2]; + double output[1]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + } + + void test_store_into_other() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_sigmoids() { NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); + populateNetworkWithSigmoids( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); double input[2]; - double output[2]; + double output1[2]; + double output2[2]; - // With ReLUs, Inputs are zeros, only biases count + // case 1 input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - // With ReLUs, case 1 + // case 2 input[0] = 1; input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); - - // With ReLUs, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - - void test_evaluate_sigmoids() + + void test_store_into_other_with_round() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSigmoids( nlr ); + populateNetworkWithRound( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); double input[2]; - double output[2]; + double output1[2]; + double output2[2]; // case 1 input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); // case 2 input[0] = 1; input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); - - // case 3 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - - void test_evaluate_non_consecutive_layers() + + void test_store_into_other_with_sign() { NLR::NetworkLevelReasoner nlr; - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); + populateNetworkWithSign( nlr ); - // Set the weights and relus - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + NLR::NetworkLevelReasoner nlr2; - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); + double input[2]; + double output1[2]; + double output2[2]; - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); + // case 1 + input[0] = 0; + input[1] = 0; - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Evaluate - double input[2]; - double output; + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + // case 2 input[0] = 1; input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - input[0] = -1; - input[1] = 2; + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_abs() + { + NLR::NetworkLevelReasoner nlr; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); + populateNetworkWithAbs( nlr ); - TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); + NLR::NetworkLevelReasoner nlr2; - TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - void test_evaluate_relus_and_abs() - { - NLR::NetworkLevelReasoner nlr; + double input[2]; + double output1[2]; + double output2[2]; - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + // case 1 + input[0] = 0; + input[1] = 0; - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); + // case 2 + input[0] = 1; + input[1] = 1; - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; - // Mark the ReLU/Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); + populateNetworkWithLeakyRelu( nlr ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); double input[2]; - double output[2]; + double output1[2]; + double output2[2]; - input[0] = 1; - input[1] = 1; + // case 1 + input[0] = 0; + input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + // case 2 input[0] = 1; - input[1] = 2; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - - void test_store_into_other() + + void test_store_into_other_with_max() { NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); + populateNetworkWithMax( nlr ); NLR::NetworkLevelReasoner nlr2; @@ -409,7 +1814,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite double output1[2]; double output2[2]; - // Inputs are zeros, only biases count + // case 1 input[0] = 0; input[1] = 0; @@ -419,9 +1824,32 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + NLR::NetworkLevelReasoner nlr2; + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - // With ReLUs, Inputs are zeros, only biases count + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 input[0] = 0; input[1] = 0; @@ -431,7 +1859,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - // With ReLUs, case 1 + // case 2 input[0] = 1; input[1] = 1; @@ -441,12 +1869,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - - void test_store_into_other_with_sigmoids() + + void test_store_into_other_with_bilinear() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSigmoids( nlr ); + populateNetworkWithBilinear( nlr ); NLR::NetworkLevelReasoner nlr2; @@ -476,7 +1904,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_interval_arithmetic_bound_propagation_relu_constraints() { NLR::NetworkLevelReasoner nlr; From 9fbca7be1144040c6817e25564a7e8a723364306 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:05:59 +0300 Subject: [PATCH 04/72] Add files via upload 0-2-0: Implementing layer simulations for every activation function. --- src/nlr/Layer.cpp | 57 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 01b6a6aaf6..f7dd40b563 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -399,6 +399,63 @@ void Layer::computeSimulations() _simulations[i][j] = 1 / ( 1 + std::exp( -simulations.get( j ) ) ); } } + else if ( _type == ROUND ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Vector &simulations = + ( *( _layerOwner->getLayer( sourceIndex._layer )->getSimulations() ) ).get( i ); + for ( unsigned j = 0; j < simulationSize; ++j ) + _simulations[i][j] = FloatUtils::round( simulations.get( j ) ); + } + } + else if ( _type == SOFTMAX ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + for ( unsigned j = 0; j < simulationSize; ++j ) + { + _simulations[i][j] = FloatUtils::negativeInfinity(); + + Vector inputs; + Vector outputs; + unsigned outputIndex = 0; + unsigned index = 0; + + for ( const auto &input : _neuronToActivationSources[i] ) + { + if ( input._neuron == i ) + outputIndex = index; + double value = ( *( _layerOwner->getLayer( input._layer )->getSimulations() ) ) + .get( input._neuron ) + .get( j ); + inputs.append( value ); + ++index; + } + + SoftmaxConstraint::softmax( inputs, outputs ); + _simulations[i][j] = outputs[outputIndex]; + } + } + } + else if ( _type == BILINEAR ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + for ( unsigned j = 0; j < simulationSize; ++j ) + { + _simulations[i][j] = 1; + for ( const auto &input : _neuronToActivationSources[i] ) + { + double value = ( *( _layerOwner->getLayer( input._layer )->getSimulations() ) ) + .get( input._neuron ) + .get( j ); + _simulations[i][j] *= value; + } + } + } + } else { printf( "Error! Neuron type %u unsupported\n", _type ); From f7aee6d5832f2e84892f5623b7abb22686bcb79d Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:08:19 +0300 Subject: [PATCH 05/72] Add files via upload 0-2-1: Testing layer simulations for every activation function. --- src/nlr/tests/Test_NetworkLevelReasoner.h | 722 ++++++++++++++++++++-- 1 file changed, 682 insertions(+), 40 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 5e98d7fb3a..96e698c7ac 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -3198,7 +3198,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); } - void test_simulate_relus() + void test_simulate_relu() { NLR::NetworkLevelReasoner nlr; @@ -3347,6 +3347,496 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.0001 ) ); } } + + void test_simulate_round() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRound( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Round, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Round, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 2.1 ) ); + simulations2.append( Vector( simulationSize, 1.4 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4, + 0.0001 ) ); + } + + // With Round, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 2.1 ) ); + simulations3.append( Vector( simulationSize, 1.6 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -12, + 0.0001 ) ); + } + } + + void test_simulate_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSign( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Sign, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Sign, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Sign, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -1 ) ); + simulations3.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4 ) ); + } + } + + void test_simulate_abs() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbs( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Abs, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Abs, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Abs, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 4 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 10 ) ); + } + } + + void test_simulate_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyRelu( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Leaky ReLU, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Leaky ReLU, case 1 (alpha=0.1) + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.9, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0.57, + 0.0001 ) ); + } + + // With Leaky ReLU, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -0.04, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -0.76, + 0.0001 ) ); + } + } + + void test_simulate_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithMax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Max, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -3 ) ); + } + + // With Max, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -18 ) ); + } + + // With Max, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -3 ) ); + simulations3.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -5 ) ); + } + } + + void test_simulate_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Softmax, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.2999, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.4001, + 0.0001 ) ); + } + + // With Softmax, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.1192, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.7615, + 0.0001 ) ); + } + + // With Softmax, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -3 ) ); + simulations3.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.1206, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.7588, + 0.0001 ) ); + } + } + + void test_simulate_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Bilinear, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + + // With Bilinear, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 0.1 ) ); + simulations2.append( Vector( simulationSize, -0.3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2.8304, + 0.0001 ) ); + } + + // With Bilinear, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -0.3 ) ); + simulations3.append( Vector( simulationSize, 0.3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.0912, + 0.0001 ) ); + } + } void test_simulate_non_consecutive_layers() { @@ -3423,53 +3913,117 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0 ) ); } - void test_simulate_relus_and_abs() + void test_simulate_abs_and_relu() { NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbsAndRelu( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2 ) ); + } - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 4 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + } + + void test_simulate_round_and_sign() + { + NLR::NetworkLevelReasoner nlr; - // Mark the ReLU/Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); + populateNetworkWithRoundAndSign( nlr ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Round/Sign, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1.6 ) ); + simulations1.append( Vector( simulationSize, 1.4 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -2 ) ); + } + + // With Round/Sign, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1.6 ) ); + simulations2.append( Vector( simulationSize, 1.6 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4 ) ); + } + } + + void test_simulate_leaky_relu_and_sigmoid() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyReluAndSigmoid( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // Simulate1 + // With LeakyReLU/Sigmoid, case 1 Vector> simulations1; simulations1.append( Vector( simulationSize, 1 ) ); simulations1.append( Vector( simulationSize, 1 ) ); @@ -3482,15 +4036,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 2 ) ); + 0.7109, + 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 2 ) ); + 1.4602, + 0.0001 ) ); } - // Simulate2 + // With LeakyReLU/Sigmoid, case 2 Vector> simulations2; simulations2.append( Vector( simulationSize, 1 ) ); simulations2.append( Vector( simulationSize, 2 ) ); @@ -3503,12 +4059,98 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 4 ) ); + 0.4013, + 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 4 ) ); + 0.6508, + 0.0001 ) ); + } + } + + void test_simulate_softmax_and_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmaxAndMax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With LeakyReLU/Sigmoid, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -2.9998, + 0.0001 ) ); + } + + // With LeakyReLU/Sigmoid, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -3 ) ); + simulations2.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1, + 0.0001 ) ); + } + } + + void test_simulate_relu_and_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithReluAndBilinear( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Relu/Bilinear, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + } + + // With ReLU/Bilinear, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); } } From 33f5d5f897dac7dfe4ab97980ebf77e7fc52a70b Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:11:35 +0300 Subject: [PATCH 06/72] Add files via upload 0-3-0: Implementing interval arithmetic for all activation functions. --- src/nlr/Layer.cpp | 752 +++++++++++++++++++++++++++++++++++++++++++++- src/nlr/Layer.h | 82 ++++- 2 files changed, 809 insertions(+), 25 deletions(-) diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index f7dd40b563..73f787595f 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -724,8 +724,30 @@ void Layer::computeIntervalArithmeticBounds() case SIGN: computeIntervalArithmeticBoundsForSign(); break; - + + case ROUND: + computeIntervalArithmeticBoundsForRound(); + break; + + case LEAKY_RELU: + computeIntervalArithmeticBoundsForLeakyRelu(); + break; + + case SIGMOID: + computeIntervalArithmeticBoundsForSigmoid(); + break; + case MAX: + computeIntervalArithmeticBoundsForMax(); + break; + + case SOFTMAX: + computeIntervalArithmeticBoundsForSoftmax(); + break; + + case BILINEAR: + computeIntervalArithmeticBoundsForBilinear(); + break; default: printf( "Error! Activation type %u unsupported\n", _type ); @@ -779,13 +801,13 @@ void Layer::computeIntervalArithmeticBoundsForWeightedSum() if ( _eliminatedNeurons.exists( i ) ) continue; - if ( newLb[i] > _lb[i] ) + if ( _lb[i] < newLb[i] ) { _lb[i] = newLb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( newUb[i] < _ub[i] ) + if ( _ub[i] > newUb[i] ) { _ub[i] = newUb[i]; _layerOwner->receiveTighterBound( @@ -813,13 +835,13 @@ void Layer::computeIntervalArithmeticBoundsForRelu() if ( lb < 0 ) lb = 0; - if ( lb > _lb[i] ) + if ( _lb[i] < lb ) { _lb[i] = lb; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( ub < _ub[i] ) + if ( _ub[i] > ub ) { _ub[i] = ub; _layerOwner->receiveTighterBound( @@ -843,13 +865,13 @@ void Layer::computeIntervalArithmeticBoundsForAbs() if ( lb > 0 ) { - if ( lb > _lb[i] ) + if ( _lb[i] < lb ) { _lb[i] = lb; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( ub < _ub[i] ) + if ( _ub[i] > ub ) { _ub[i] = ub; _layerOwner->receiveTighterBound( @@ -858,13 +880,13 @@ void Layer::computeIntervalArithmeticBoundsForAbs() } else if ( ub < 0 ) { - if ( -ub > _lb[i] ) + if ( _lb[i] < -ub ) { _lb[i] = -ub; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( -lb < _ub[i] ) + if ( _ub[i] > -lb ) { _ub[i] = -lb; _layerOwner->receiveTighterBound( @@ -881,7 +903,7 @@ void Layer::computeIntervalArithmeticBoundsForAbs() Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - if ( FloatUtils::max( ub, -lb ) < _ub[i] ) + if ( _ub[i] > FloatUtils::max( ub, -lb ) ) { _ub[i] = FloatUtils::max( ub, -lb ); _layerOwner->receiveTighterBound( @@ -903,21 +925,408 @@ void Layer::computeIntervalArithmeticBoundsForSign() double lb = sourceLayer->getLb( sourceIndex._neuron ); double ub = sourceLayer->getUb( sourceIndex._neuron ); + + double new_lb; + double new_ub; if ( !FloatUtils::isNegative( lb ) ) { - _lb[i] = 1; - _ub[i] = 1; + new_lb = 1; + new_ub = 1; } else if ( FloatUtils::isNegative( ub ) ) { - _lb[i] = -1; - _ub[i] = -1; + new_lb = -1; + new_ub = -1; } else { - _lb[i] = -1; - _ub[i] = 1; + new_lb = -1; + new_ub = 1; + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < new_lb ) + { + _lb[i] = new_lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > new_ub ) + { + _ub[i] = new_ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeIntervalArithmeticBoundsForLeakyRelu() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + double lb = sourceLayer->getLb( sourceIndex._neuron ); + double ub = sourceLayer->getUb( sourceIndex._neuron ); + + if ( lb > 0 ) + { + if ( _lb[i] < lb ) + { + _lb[i] = lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + else if ( ub < 0 ) + { + if ( _lb[i] < _alpha * lb ) + { + _lb[i] = _alpha * lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > _alpha * ub ) + { + _ub[i] = _alpha * ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + else + { + // lb < 0 < ub + if ( _lb[i] < _alpha * lb ) + { + _lb[i] = _alpha * lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +void Layer::computeIntervalArithmeticBoundsForSigmoid() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + double lb = sourceLayer->getLb( sourceIndex._neuron ); + double ub = sourceLayer->getUb( sourceIndex._neuron ); + + double lbSigmoid = SigmoidConstraint::sigmoid( lb ); + double ubSigmoid = SigmoidConstraint::sigmoid( ub ); + + + if ( _lb[i] < lbSigmoid ) + { + _lb[i] = lbSigmoid; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ubSigmoid ) + { + _ub[i] = ubSigmoid; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + + +void Layer::computeIntervalArithmeticBoundsForRound() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + double lb = sourceLayer->getLb( sourceIndex._neuron ); + double ub = sourceLayer->getUb( sourceIndex._neuron ); + + double lbRound = FloatUtils::round( lb ); + double ubRound = FloatUtils::round( ub ); + + + if ( _lb[i] < lbRound ) + { + _lb[i] = lbRound; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ubRound ) + { + _ub[i] = ubRound; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + + +void Layer::computeIntervalArithmeticBoundsForMax() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); + double maxLowerBound = FloatUtils::negativeInfinity(); + double maxUpperBound = FloatUtils::negativeInfinity(); + + Map sourceLbs; + Map sourceUbs; + unsigned counter = 0; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs[sourceIndex] = sourceLb; + sourceUbs[sourceIndex] = sourceUb; + + if ( maxLowerBound < sourceLb ) + { + indexOfMaxLowerBound = sourceIndex; + maxLowerBound = sourceLb; + } + if ( maxUpperBound < sourceUb ) + { + maxUpperBound = sourceUb; + } + ++counter; + } + + // The phase is fixed if the lower-bound of a source variable x_b is + // larger than the upper-bounds of the other source variables. + bool phaseFixed = true; + for ( const auto &sourceIndex : sources ) + { + if ( sourceIndex != indexOfMaxLowerBound && + FloatUtils::gt( sourceUbs[sourceIndex], maxLowerBound ) ) + { + phaseFixed = false; + break; + } + } + + if ( phaseFixed ) + { + // Phase fixed + // Concrete bound: lb_b <= x_f <= ub_b + if ( _lb[i] < maxLowerBound ) + { + _lb[i] = maxLowerBound; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > sourceUbs[indexOfMaxLowerBound] ) + { + _ub[i] = sourceUbs[indexOfMaxLowerBound]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + else + { + // MaxPool not fixed + // Concrete bounds: lb_b <= x_f <= maxUpperBound + if ( _lb[i] < maxLowerBound ) + { + _lb[i] = maxLowerBound; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > maxUpperBound ) + { + _ub[i] = maxUpperBound; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } + } +} + +void Layer::computeIntervalArithmeticBoundsForSoftmax() +{ + Set handledInputNeurons; + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + ASSERT( sourceLayer->getSize() == _size ); + + Vector sourceLbs; + Vector sourceUbs; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + } + + // Find the index of i in the softmax + unsigned index = 0; + for ( const auto &sourceIndex : sources ) + { + if ( handledInputNeurons.exists( sourceIndex._neuron ) ) + ++index; + else + { + handledInputNeurons.insert( sourceIndex._neuron ); + break; + } + } + + double lb = softmaxLinearLowerBound( sourceLbs, sourceUbs, index ); + double ub = softmaxLinearUpperBound( sourceLbs, sourceUbs, index ); + if ( _lb[i] < lb ) + { + _lb[i] = lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeIntervalArithmeticBoundsForBilinear() +{ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + ASSERT( sources.size() == 2 ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + double value = sourceValues[0] * sourceValues[1]; + + if ( _lb[i] < value ) + { + _lb[i] = value; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > value ) + { + _ub[i] = value; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + continue; + } + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + + if ( _lb[i] < lb ) + { + _lb[i] = lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); } } } @@ -1680,6 +2089,317 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } +double Layer::softmaxLSELowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + double sum = 0; + for ( unsigned j = 0; j < inputs.size(); ++j ) + { + double lj = inputLbs[j]; + double uj = inputUbs[j]; + double xj = inputs[j]; + sum += + ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + } + + return std::exp( inputs[i] ) / sum; +} + +double Layer::softmaxdLSELowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double val = 0; + if ( i == di ) + val += softmaxLSELowerBound( inputMids, inputLbs, inputUbs, i ); + + double ldi = inputLbs[di]; + double udi = inputUbs[di]; + + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + double lj = inputLbs[j]; + double uj = inputUbs[j]; + double xj = inputMids[j]; + + sum += + ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + } + + val -= std::exp( inputMids[i] ) / ( sum * sum ) * ( std::exp( udi ) - std::exp( ldi ) ) / + ( udi - ldi ); + + return val; +} + +double Layer::softmaxLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + double max = FloatUtils::negativeInfinity(); + unsigned maxInputIndex = 0; + unsigned index = 0; + for ( const auto &mid : inputMids ) + { + if ( mid > max ) + { + max = mid; + maxInputIndex = index; + } + ++index; + } + + if ( maxInputIndex == i ) + return softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); + else + { + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + sum += 1; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + + sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + + ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); + } + } + + return std::exp( inputMids[i] - inputMids[maxInputIndex] ) / sum; + } +} + +double Layer::softmaxdLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double max = FloatUtils::negativeInfinity(); + unsigned maxInputIndex = 0; + unsigned index = 0; + for ( const auto &mid : inputMids ) + { + if ( mid > max ) + { + max = mid; + maxInputIndex = index; + } + ++index; + } + + if ( maxInputIndex == i ) + return softmaxdERLowerBound( inputMids, inputLbs, inputUbs, i, di ); + else + { + double val = softmaxLSELowerBound2( inputMids, inputLbs, inputUbs, i ); + + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + sum += 1; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + + ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); + } + } + double val2 = std::exp( inputMids[i] - inputMids[maxInputIndex] ) / ( sum * sum ); + + if ( i == di ) + { + double ldijstar = inputLbs[i] - inputUbs[maxInputIndex]; + double udijstar = inputUbs[i] - inputLbs[maxInputIndex]; + return val - + val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / ( udijstar - ldijstar ); + } + else if ( maxInputIndex == di ) + { + double sum2 = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + continue; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + sum2 += ( std::exp( ujjstar ) - std::exp( ljjstar ) ) / ( ujjstar - ljjstar ); + } + } + return -val + val2 * sum2; + } + else + { + double ldijstar = inputLbs[di] - inputUbs[maxInputIndex]; + double udijstar = inputUbs[di] - inputLbs[maxInputIndex]; + return -val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / + ( udijstar - ldijstar ); + } + } +} + +double Layer::softmaxLSEUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) +{ + double li = outputLb[i]; + double ui = outputUb[i]; + + Vector inputTilda; + SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); + + return ( ( li * std::log( ui ) - ui * std::log( li ) ) / ( std::log( ui ) - std::log( li ) ) - + ( ui - li ) / ( std::log( ui ) - std::log( li ) ) * + SoftmaxConstraint::logSumOfExponential( inputTilda ) ); +} + +double Layer::softmaxdLSEUpperbound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) +{ + double li = outputLb[i]; + double ui = outputUb[i]; + + double val = -( ui - li ) / ( std::log( ui ) - std::log( li ) ); + + double val2 = std::exp( inputMids[di] ) / SoftmaxConstraint::sumOfExponential( inputMids ); + if ( i == di ) + val2 -= 1; + + return val * val2; +} + +double Layer::softmaxERLowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + Vector inputTilda; + SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); + + double sum = 0; + for ( unsigned j = 0; j < inputs.size(); ++j ) + { + if ( i == j ) + sum += 1; + else + { + double ljTilda = inputLbs[j] - inputUbs[i]; + double ujTilda = inputUbs[j] - inputLbs[i]; + double xjTilda = inputTilda[j]; + + sum += ( ujTilda - xjTilda ) / ( ujTilda - ljTilda ) * std::exp( ljTilda ) + + ( xjTilda - ljTilda ) / ( ujTilda - ljTilda ) * std::exp( ujTilda ); + } + } + + return 1 / sum; +} + +double Layer::softmaxdERLowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double val = softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); + + if ( i != di ) + { + double ldiTilda = inputLbs[di] - inputUbs[i]; + double udiTilda = inputUbs[di] - inputLbs[i]; + return -val * val * ( std::exp( udiTilda ) - std::exp( ldiTilda ) ) / + ( udiTilda - ldiTilda ); + } + else + { + double val2 = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j != i ) + { + double ljTilda = inputLbs[j] - inputUbs[i]; + double ujTilda = inputUbs[j] - inputLbs[i]; + val2 += ( std::exp( ujTilda ) - std::exp( ljTilda ) ) / ( ujTilda - ljTilda ); + } + } + return val * val * val2; + } +} + +double Layer::softmaxERUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) +{ + double li = outputLb[i]; + double ui = outputUb[i]; + + Vector inputTilda; + SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); + + return ui + li - ui * li * SoftmaxConstraint::sumOfExponential( inputTilda ); +} + +double Layer::softmaxdERUpperBound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) +{ + double li = outputLb[i]; + double ui = outputUb[i]; + + + if ( i == di ) + { + double val2 = -1; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + val2 += std::exp( inputMids[j] - inputMids[i] ); + return li * ui * val2; + } + else + return -li * ui * std::exp( inputMids[di] - inputMids[i] ); +} + +double Layer::softmaxLinearLowerBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + Vector uTilda; + SoftmaxConstraint::xTilda( inputUbs, inputLbs[i], uTilda ); + uTilda[i] = 0; + return 1 / SoftmaxConstraint::sumOfExponential( uTilda ); +} + +double Layer::softmaxLinearUpperBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + Vector lTilda; + SoftmaxConstraint::xTilda( inputLbs, inputUbs[i], lTilda ); + lTilda[i] = 0; + return 1 / SoftmaxConstraint::sumOfExponential( lTilda ); +} + void Layer::eliminateVariable( unsigned variable, double value ) { if ( !_variableToNeuron.exists( variable ) ) diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index e52ea01038..ceb16b3d68 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -138,7 +138,7 @@ class Layer void obtainCurrentBounds(); void computeSymbolicBounds(); void computeIntervalArithmeticBounds(); - + /* Preprocessing functionality: variable elimination and reindexing */ @@ -207,16 +207,74 @@ class Layer void allocateMemory(); void freeMemoryIfNeeded(); - + /* - Helper functions for symbolic bound tightening + The following methods compute concrete softmax output bounds + using different linear approximation, as well as the coefficients + of softmax inputs in the symbolic bounds */ - void comptueSymbolicBoundsForInput(); - void computeSymbolicBoundsForRelu(); - void computeSymbolicBoundsForSign(); - void computeSymbolicBoundsForAbsoluteValue(); - void computeSymbolicBoundsForWeightedSum(); - void computeSymbolicBoundsDefault(); + double softmaxLSELowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ); + + double softmaxdLSELowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ); + + double softmaxLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ); + + double softmaxdLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ); + + double softmaxLSEUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ); + + double softmaxdLSEUpperbound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ); + + double softmaxERLowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ); + + double softmaxdERLowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ); + + double softmaxERUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ); + + double softmaxdERUpperBound( const Vector &inputMids, + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ); + + double softmaxLinearLowerBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ); + + double softmaxLinearUpperBound( const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ); /* Helper functions for interval bound tightening @@ -225,6 +283,12 @@ class Layer void computeIntervalArithmeticBoundsForRelu(); void computeIntervalArithmeticBoundsForAbs(); void computeIntervalArithmeticBoundsForSign(); + void computeIntervalArithmeticBoundsForMax(); + void computeIntervalArithmeticBoundsForLeakyRelu(); + void computeIntervalArithmeticBoundsForSigmoid(); + void computeIntervalArithmeticBoundsForRound(); + void computeIntervalArithmeticBoundsForSoftmax(); + void computeIntervalArithmeticBoundsForBilinear(); const double *getSymbolicLb() const; const double *getSymbolicUb() const; From 6d1c898922aef6c34c6c4cb5e7d809b5a593ddfe Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:13:47 +0300 Subject: [PATCH 07/72] Add files via upload 0-3-1: Adding interval arithmetic tests for all activation functions. --- src/nlr/tests/Test_NetworkLevelReasoner.h | 3999 ++++++++++++++------- 1 file changed, 2775 insertions(+), 1224 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 96e698c7ac..33554ba680 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -1904,22 +1904,60 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - - void test_interval_arithmetic_bound_propagation_relu_constraints() + + void populateNetworkSBT( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - double large = 1000; + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -1930,47 +1968,67 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + } + + void test_sbt_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); - // Initialize + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Layer 1: - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + Both ReLUs active, bound survive through activations: - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), } ); List bounds; @@ -1979,294 +2037,247 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); for ( const auto &bound : expectedBounds ) TS_ASSERT( bounds.exists( bound ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + void test_sbt_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); - // Initialize + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Layer 1: - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds2.size(), bounds.size() ); - for ( const auto &bound : expectedBounds2 ) - TS_ASSERT( bounds.exists( bound ) ); + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); } - void test_interval_arithmetic_bound_propagation_abs_constraints() + void test_sbt_relus_active_and_not_fixed() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); + /* + Input ranges: - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); + x0: [4, 6] + x1: [1, 5] - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); + Layer 1: - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - // Layer dependenices - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 4, 5 ); + First ReLU is undecided, bound is concretized. + Coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + x4 range: [0, 12] + x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + Layer 2: - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + x6.lb = 0.5x0 + 1.25x1 - 11.25 + x6.ub = 0.5x0 + 1.25x1 - 8.25 - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] + */ - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - nlr.setTableau( &tableau ); + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + void test_sbt_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + /* + Input ranges: - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), - } ); + x0: [4, 6] + x1: [1, 5] - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); + Layer 1: - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + x4.lb = 0 + x4.ub = 0 - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + Layer 2: - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + List expectedBounds( { + // x2 does not appear, because it has been eliminated - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds2.size(), bounds.size() ); - for ( const auto &bound : expectedBounds2 ) - TS_ASSERT( bounds.exists( bound ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); } - void populateNetworkSBT( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + void test_sbt_abs_all_positive() { - /* - 2 R 1 - x0 --- x2 ---> x4 --- x6 - \ / / - 1 \ / / - \/ -1 / - /\ / - 3 / \ / - / \ R / - x1 --- x3 ---> x5 - 1 - */ + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); // Mark layer dependencies @@ -2300,7 +2311,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Very loose bounds for neurons except inputs double large = 1000000; - tableau.getBoundManager().initialize( 7 ); tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -2311,16 +2321,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - } - - void test_sbt_relus_all_active() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); @@ -2345,7 +2345,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x3.lb = x0 + x1 : [5, 11] x3.ub = x0 + x1 : [5, 11] - Both ReLUs active, bound survive through activations: + Both absolute values positive, bound survive through activations: x4.lb = 2x0 + 3x1 : [11, 27] x4.ub = 2x0 + 3x1 : [11, 27] @@ -2378,18 +2378,66 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); } - void test_sbt_relus_active_and_inactive() + void test_sbt_abs_positive_and_negative() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); @@ -2417,19 +2465,19 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x3.lb = x0 + x1 : [5, 11] x3.ub = x0 + x1 : [5, 11] - First ReLU is inactive, bounds get zeroed - Second ReLU is active, bounds surive the activation + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation - x4.lb = 0 - x4.ub = 0 + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] x5.lb = x0 + x1 : [5, 11] x5.ub = x0 + x1 : [5, 11] Layer 2: - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] */ List expectedBounds( { @@ -2438,120 +2486,88 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 3, 5, Tightening::LB ), Tightening( 3, 11, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), Tightening( 5, 5, Tightening::LB ), Tightening( 5, 11, Tightening::UB ), - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) TS_ASSERT( expectedBounds.exists( bound ) ); } - void test_sbt_relus_active_and_not_fixed() + void test_sbt_absolute_values_positive_and_not_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is undecided, bound is concretized. - Coefficient: 12/(12--4) = 12/16 = 0.75 - Second ReLU is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 - x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - x6.lb = 0.5x0 + 1.25x1 - 11.25 - x6.ub = 0.5x0 + 1.25x1 - 8.25 + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] - */ + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 1, Tightening::UB ), - } ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - void test_sbt_relus_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + // Very loose bounds for neurons except inputs + double large = 1000000; - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); tableau.setLowerBound( 0, 4 ); tableau.setUpperBound( 0, 6 ); tableau.setLowerBound( 1, 1 ); tableau.setUpperBound( 1, 5 ); - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + // Strong negative bias for x2, which is node (1,0) nlr.setBias( 1, 0, -15 ); - // However, one of the ReLU's variables has been eliminated - nlr.eliminateVariable( 2, -3 ); - // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); @@ -2570,34 +2586,37 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x3.lb = x0 + x1 : [5, 11] x3.ub = x0 + x1 : [5, 11] - First ReLU is inactive (set externally), bounds get zeroed + First absolute value is undecided, bounds are concretized. Second ReLU is active, bounds surive the activation + x4 range: [0, 12] x4.lb = 0 - x4.ub = 0 + x4.ub = 12 x5.lb = x0 + x1 : [5, 11] x5.ub = x0 + x1 : [5, 11] Layer 2: - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] */ List expectedBounds( { - // x2 does not appear, because it has been eliminated - + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), Tightening( 3, 5, Tightening::LB ), Tightening( 3, 11, Tightening::UB ), Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), + Tightening( 4, 12, Tightening::UB ), Tightening( 5, 5, Tightening::LB ), Tightening( 5, 11, Tightening::UB ), Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), + Tightening( 6, 7, Tightening::UB ), } ); List bounds; @@ -2608,7 +2627,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( expectedBounds.exists( bound ) ); } - void test_sbt_abs_all_positive() + void test_sbt_absolute_values_active_and_externally_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); @@ -2670,6 +2689,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 1, 1 ); tableau.setUpperBound( 1, 5 ); + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); @@ -2682,531 +2707,461 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 1: - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] + x2 is eliminated, everything set to -3 x3.lb = x0 + x1 : [5, 11] x3.ub = x0 + x1 : [5, 11] - Both absolute values positive, bound survive through activations: + Second absolute value is positive, bounds surive the activation - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] + x4: all set to 3 x5.lb = x0 + x1 : [5, 11] x5.ub = x0 + x1 : [5, 11] Layer 2: - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] */ List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), + // x2 does not appear, because it has been eliminated + Tightening( 3, 5, Tightening::LB ), Tightening( 3, 11, Tightening::UB ), - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), Tightening( 5, 5, Tightening::LB ), Tightening( 5, 11, Tightening::UB ), - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + printf( "Dumpign discovered bounds:\n" ); + for ( const auto &bound : bounds ) + bound.dump(); + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); for ( const auto &bound : bounds ) TS_ASSERT( expectedBounds.exists( bound ) ); } - void test_sbt_abs_positive_and_negative() + void test_generate_input_query() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) + for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - // Very loose bounds for neurons except inputs - double large = 1000000; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); + // Set the weights and biases for the weighted sum layers - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); - /* - Input ranges: + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); - x0: [4, 6] - x1: [1, 5] + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); - Layer 1: + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 0 ); + nlr.setBias( 1, 2, 0 ); - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + nlr.setBias( 3, 0, 0 ); + nlr.setBias( 3, 1, 2 ); - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + nlr.setBias( 5, 0, 0 ); + nlr.setBias( 5, 1, 0 ); - First absolute value is negative, bounds get flipped - Second absolute value is positive, bounds surive the activation + // Mark the ReLU/Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); - x4.lb = -2x0 -3x1 + 30 : [3, 19] - x4.ub = -2x0 -3x1 + 30 : [3, 19] + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + // Start the testing + Query ipq; + nlr.generateQuery( ipq ); + List unhandledEquations; + Set varsInUnhandledConstraints; + TS_ASSERT( + ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); + NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); - Layer 2: + double input[2]; + double output[2]; - x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] - x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] - */ + input[0] = 1; + input[1] = 1; - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 19, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 14, Tightening::UB ), - } ); + input[0] = 1; + input[1] = 2; - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); } - void test_sbt_absolute_values_positive_and_not_fixed() + void test_simulate_relu() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + populateNetwork( nlr ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); + // With ReLUs, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + // With ReLUs, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 1 ) ); + } - // Very loose bounds for neurons except inputs - double large = 1000000; + // With ReLUs, case 1 and 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0 ) ); + } + } - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); + void test_simulate_sigmoids() + { + NLR::NetworkLevelReasoner nlr; - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + populateNetworkWithSigmoids( nlr ); - /* - Input ranges: + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - x0: [4, 6] - x1: [1, 5] + // case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); - Layer 1: + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6750, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 3.0167, + 0.0001 ) ); + } - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + // case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); - First absolute value is undecided, bounds are concretized. - Second ReLU is active, bounds surive the activation + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - x4 range: [0, 12] - x4.lb = 0 - x4.ub = 12 + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6032, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.5790, + 0.0001 ) ); + } - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + // case 3 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); - Layer 2: + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 + 12 : [ 1, 7] + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.5045, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.1957, + 0.0001 ) ); + } + } + + void test_simulate_round() + { + NLR::NetworkLevelReasoner nlr; - x6 range: [-11, 7] - */ + populateNetworkWithRound( nlr ); - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + // With Round, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, 7, Tightening::UB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } + // With Round, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 2.1 ) ); + simulations2.append( Vector( simulationSize, 1.4 ) ); - void test_sbt_absolute_values_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4, + 0.0001 ) ); + } - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + // With Round, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 2.1 ) ); + simulations3.append( Vector( simulationSize, 1.6 ) ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -12, + 0.0001 ) ); + } + } + + void test_simulate_sign() + { + NLR::NetworkLevelReasoner nlr; - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + populateNetworkWithSign( nlr ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + // With Sign, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, the weighted sum variable has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2 is eliminated, everything set to -3 - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Second absolute value is positive, bounds surive the activation - - x4: all set to 3 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 + 3 : [-8, -2] - x6.ub = - x0 - x1 + 3 : [-8, -2] - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, -2, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - printf( "Dumpign discovered bounds:\n" ); - for ( const auto &bound : bounds ) - bound.dump(); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_generate_input_query() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Variable indexing - - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - - // Set the weights and biases for the weighted sum layers - - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 1, 1, 0 ); - nlr.setBias( 1, 2, 0 ); - - nlr.setBias( 3, 0, 0 ); - nlr.setBias( 3, 1, 2 ); - - nlr.setBias( 5, 0, 0 ); - nlr.setBias( 5, 1, 0 ); - - // Mark the ReLU/Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Start the testing - Query ipq; - nlr.generateQuery( ipq ); - List unhandledEquations; - Set varsInUnhandledConstraints; - TS_ASSERT( - ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); - NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; + // With Sign, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } - input[0] = 1; - input[1] = 2; + // With Sign, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -1 ) ); + simulations3.append( Vector( simulationSize, 1 ) ); - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4 ) ); + } } - - void test_simulate_relu() + + void test_simulate_abs() { NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); + populateNetworkWithAbs( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // With ReLUs, Inputs are zeros, only biases count + // With Abs, Inputs are zeros, only biases count Vector> simulations1; simulations1.append( Vector( simulationSize, 0 ) ); simulations1.append( Vector( simulationSize, 0 ) ); @@ -3227,10 +3182,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 4 ) ); } - // With ReLUs, case 1 + // With Abs, case 1 Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); @@ -3245,10 +3200,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 1 ) ); + 4 ) ); } - // With ReLUs, case 1 and 2 + // With Abs, case 2 Vector> simulations3; simulations3.append( Vector( simulationSize, 1 ) ); simulations3.append( Vector( simulationSize, 2 ) ); @@ -3261,24 +3216,24 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0 ) ); + 4 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 0 ) ); + 10 ) ); } } - - void test_simulate_sigmoids() + + void test_simulate_leaky_relu() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSigmoids( nlr ); + populateNetworkWithLeakyRelu( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // case 1 + // With Leaky ReLU, Inputs are zeros, only biases count Vector> simulations1; simulations1.append( Vector( simulationSize, 0 ) ); simulations1.append( Vector( simulationSize, 0 ) ); @@ -3291,17 +3246,15 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0.6750, - 0.0001 ) ); + 1 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 3.0167, - 0.0001 ) ); + 4 ) ); } - // case 2 + // With Leaky ReLU, case 1 (alpha=0.1) Vector> simulations2; simulations2.append( Vector( simulationSize, 1 ) ); simulations2.append( Vector( simulationSize, 1 ) ); @@ -3314,17 +3267,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0.6032, + 0.9, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 2.5790, + 0.57, 0.0001 ) ); } - // case 3 + // With Leaky ReLU, case 2 Vector> simulations3; simulations3.append( Vector( simulationSize, 1 ) ); simulations3.append( Vector( simulationSize, 2 ) ); @@ -3337,26 +3290,26 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0.5045, + -0.04, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 2.1957, + -0.76, 0.0001 ) ); } } - void test_simulate_round() + void test_simulate_max() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithRound( nlr ); + populateNetworkWithMax( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // With Round, Inputs are zeros, only biases count + // With Max, Inputs are zeros, only biases count Vector> simulations1; simulations1.append( Vector( simulationSize, 0 ) ); simulations1.append( Vector( simulationSize, 0 ) ); @@ -3369,18 +3322,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); + -3 ) ); } - // With Round, case 1 + // With Max, case 1 Vector> simulations2; - simulations2.append( Vector( simulationSize, 2.1 ) ); - simulations2.append( Vector( simulationSize, 1.4 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); @@ -3390,20 +3338,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 2, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4, - 0.0001 ) ); + -18 ) ); } - // With Round, case 2 + // With Max, case 2 Vector> simulations3; - simulations3.append( Vector( simulationSize, 2.1 ) ); - simulations3.append( Vector( simulationSize, 1.6 ) ); + simulations3.append( Vector( simulationSize, -3 ) ); + simulations3.append( Vector( simulationSize, 3 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); @@ -3413,26 +3354,19 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -12, - 0.0001 ) ); + -5 ) ); } } - void test_simulate_sign() + void test_simulate_softmax() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSign( nlr ); + populateNetworkWithSoftmax( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // With Sign, Inputs are zeros, only biases count + // With Softmax, Inputs are zeros, only biases count Vector> simulations1; simulations1.append( Vector( simulationSize, 0 ) ); simulations1.append( Vector( simulationSize, 0 ) ); @@ -3445,18 +3379,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 1 ) ); + 0.2999, + 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 4 ) ); + 2.4001, + 0.0001 ) ); } - // With Sign, case 1 + // With Softmax, case 1 Vector> simulations2; - simulations2.append( Vector( simulationSize, -2 ) ); - simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); @@ -3466,18 +3402,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 1 ) ); + 0.1192, + 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 4 ) ); + 2.7615, + 0.0001 ) ); } - // With Sign, case 2 + // With Softmax, case 2 Vector> simulations3; - simulations3.append( Vector( simulationSize, -1 ) ); - simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, -3 ) ); + simulations3.append( Vector( simulationSize, 3 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); @@ -3487,24 +3425,26 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - -1 ) ); + 0.1206, + 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - -4 ) ); + 2.7588, + 0.0001 ) ); } } - void test_simulate_abs() + void test_simulate_bilinear() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithAbs( nlr ); + populateNetworkWithBilinear( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // With Abs, Inputs are zeros, only biases count + // With Bilinear, Inputs are zeros, only biases count Vector> simulations1; simulations1.append( Vector( simulationSize, 0 ) ); simulations1.append( Vector( simulationSize, 0 ) ); @@ -3517,18 +3457,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); + 0 ) ); } - // With Abs, case 1 + // With Bilinear, case 1 Vector> simulations2; - simulations2.append( Vector( simulationSize, -2 ) ); - simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, 0.1 ) ); + simulations2.append( Vector( simulationSize, -0.3 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); @@ -3538,18 +3473,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); + 2.8304, + 0.0001 ) ); } - // With Abs, case 2 + // With Bilinear, case 2 Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); + simulations3.append( Vector( simulationSize, -0.3 ) ); + simulations3.append( Vector( simulationSize, 0.3 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); @@ -3559,50 +3490,100 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 4 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 10 ) ); + 0.0912, + 0.0001 ) ); } } - - void test_simulate_leaky_relu() + + void test_simulate_non_consecutive_layers() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyRelu( nlr ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // With Leaky ReLU, Inputs are zeros, only biases count + // Simulate1 Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); for ( unsigned i = 0; i < simulationSize; ++i ) - { TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 1 ) ); + 2 ) ); + + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) + .get( 0 ) .get( i ), - 4 ) ); - } + 0 ) ); + } - // With Leaky ReLU, case 1 (alpha=0.1) - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); + void test_simulate_abs_and_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbsAndRelu( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); for ( unsigned i = 0; i < simulationSize; ++i ) { @@ -3610,22 +3591,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0.9, - 0.0001 ) ); + 2 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 0.57, - 0.0001 ) ); + 2 ) ); } - // With Leaky ReLU, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); for ( unsigned i = 0; i < simulationSize; ++i ) { @@ -3633,29 +3612,27 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - -0.04, - 0.0001 ) ); + 4 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - -0.76, - 0.0001 ) ); + 4 ) ); } } - void test_simulate_max() + void test_simulate_round_and_sign() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithMax( nlr ); + populateNetworkWithRoundAndSign( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // With Max, Inputs are zeros, only biases count + // With Round/Sign, case 1 Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 1.6 ) ); + simulations1.append( Vector( simulationSize, 1.4 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); @@ -3665,13 +3642,18 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - -3 ) ); + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -2 ) ); } - // With Max, case 1 + // With Round/Sign, case 2 Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, -3 ) ); + simulations2.append( Vector( simulationSize, 1.6 ) ); + simulations2.append( Vector( simulationSize, 1.6 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); @@ -3681,38 +3663,27 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - -18 ) ); - } - - // With Max, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -3 ) ); - simulations3.append( Vector( simulationSize, 3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { + -1 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) + .get( 1 ) .get( i ), - -5 ) ); + -4 ) ); } } - void test_simulate_softmax() + void test_simulate_leaky_relu_and_sigmoid() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmax( nlr ); + populateNetworkWithLeakyReluAndSigmoid( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // With Softmax, Inputs are zeros, only biases count + // With LeakyReLU/Sigmoid, case 1 Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); @@ -3722,20 +3693,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0.2999, + 0.7109, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 2.4001, + 1.4602, 0.0001 ) ); } - // With Softmax, case 1 + // With LeakyReLU/Sigmoid, case 2 Vector> simulations2; simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, -3 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); @@ -3745,52 +3716,29 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0.1192, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.7615, - 0.0001 ) ); - } - - // With Softmax, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -3 ) ); - simulations3.append( Vector( simulationSize, 3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.1206, + 0.4013, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 1 ) .get( i ), - 2.7588, + 0.6508, 0.0001 ) ); } } - void test_simulate_bilinear() + void test_simulate_softmax_and_max() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithBilinear( nlr ); + populateNetworkWithSoftmaxAndMax( nlr ); unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // With Bilinear, Inputs are zeros, only biases count + // With LeakyReLU/Sigmoid, case 1 Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, -3 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); @@ -3800,13 +3748,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 0 ) ); + -2.9998, + 0.0001 ) ); } - // With Bilinear, case 1 + // With LeakyReLU/Sigmoid, case 2 Vector> simulations2; - simulations2.append( Vector( simulationSize, 0.1 ) ); - simulations2.append( Vector( simulationSize, -0.3 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); + simulations2.append( Vector( simulationSize, 3 ) ); TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); @@ -3816,112 +3765,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 2.8304, - 0.0001 ) ); - } - - // With Bilinear, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -0.3 ) ); - simulations3.append( Vector( simulationSize, 0.3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.0912, + -1, 0.0001 ) ); } } - - void test_simulate_non_consecutive_layers() + + void test_simulate_relu_and_bilinear() { NLR::NetworkLevelReasoner nlr; - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Set the weights and relus - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // Simulate1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2 ) ); - - // Simulate2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } + populateNetworkWithReluAndBilinear( nlr ); - void test_simulate_abs_and_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbsAndRelu( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - // Simulate1 + // With Relu/Bilinear, case 1 Vector> simulations1; simulations1.append( Vector( simulationSize, 1 ) ); simulations1.append( Vector( simulationSize, 1 ) ); @@ -3934,15 +3791,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 2 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2 ) ); + 1 ) ); } - // Simulate2 + // With ReLU/Bilinear, case 2 Vector> simulations2; simulations2.append( Vector( simulationSize, 1 ) ); simulations2.append( Vector( simulationSize, 2 ) ); @@ -3955,203 +3807,1883 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) .get( 0 ) .get( i ), - 4 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); + 0 ) ); } } - void test_simulate_round_and_sign() + void test_interval_arithmetic_bound_propagation_relu_constraints() { NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); - populateNetworkWithRoundAndSign( nlr ); + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); - // With Round/Sign, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1.6 ) ); - simulations1.append( Vector( simulationSize, 1.4 ) ); + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + nlr.setTableau( &tableau ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -2 ) ); - } + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_abs_constraints() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Layer dependenices + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_sign_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSign( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, 3 ); + tableau.setUpperBound( 0, 4 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, 4, Tightening::LB ), Tightening( 2, 5, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 11, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, 1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, 1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, 4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithLeakyRelu( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + + Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), + Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), + + Tightening( 12, -0.28, Tightening::LB ), Tightening( 12, 10.1, Tightening::UB ), + Tightening( 13, -1.42, Tightening::LB ), Tightening( 13, 37.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), + Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + + Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), + Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), + + Tightening( 12, -0.34, Tightening::LB ), Tightening( 12, 7.1, Tightening::UB ), + Tightening( 13, -1.3, Tightening::LB ), Tightening( 13, 29, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_round_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithRound( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, 1.4 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, -1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), + Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, -25, Tightening::LB ), Tightening( 13, 35, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3.1 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, 1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), + Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), + Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + + Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), + + Tightening( 12, -16, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -61, Tightening::LB ), Tightening( 13, 7, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_sigmoid_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSigmoids( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), + Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), + Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), + + Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), + Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), + + Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), + Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), + + Tightening( 12, 0.4441, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), + Tightening( 13, 2.2364, Tightening::LB ), Tightening( 13, 3.5399, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), + Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + + Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), + Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), + + Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), + Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), + + Tightening( 12, 0.3183, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), + Tightening( 13, 1.9963, Tightening::LB ), Tightening( 13, 3.6263, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_max_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithMax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), + Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + + Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), + Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), + Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), + + Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_softmax_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSoftmax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), + Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), + + Tightening( 12, 0.0240, Tightening::LB ), Tightening( 12, 0.8349, Tightening::UB ), + Tightening( 13, 0.5192, Tightening::LB ), Tightening( 13, 3.7629, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), + Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), + + Tightening( 12, 0.0184, Tightening::LB ), Tightening( 12, 0.8678, Tightening::UB ), + Tightening( 13, 0.4151, Tightening::LB ), Tightening( 13, 3.8125, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_bilinear_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithBilinear( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -0.1 ); + tableau.setUpperBound( 0, 0.1 ); + tableau.setLowerBound( 1, -0.1 ); + tableau.setUpperBound( 1, 0.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0.9, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), + Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), + Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), + Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), + + Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), + Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), + + Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), + Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), + + Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), + + Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -0.3 ); + tableau.setUpperBound( 0, 0.1 ); + tableau.setLowerBound( 1, -0.1 ); + tableau.setUpperBound( 1, 0.2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, 0.7, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), + Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), + Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), + Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), + + Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), + Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), + + Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), + Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), + + Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), + + Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithAbsAndRelu( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + + Tightening( 10, -10, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithRoundAndSign( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, 1.4 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, -1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), + Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3.1 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, 1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), + Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), + Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + + Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), + Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), + + Tightening( 12, 0.0573, Tightening::LB ), Tightening( 12, 0.9999, Tightening::UB ), + Tightening( 13, 0.1229, Tightening::LB ), Tightening( 13, 3.9996, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), + Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + + Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), + Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), + + Tightening( 12, 0.0323, Tightening::LB ), Tightening( 12, 0.9992, Tightening::UB ), + Tightening( 13, 0.1498, Tightening::LB ), Tightening( 13, 3.9972, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSoftmaxAndMax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - // With Round/Sign, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1.6 ) ); - simulations2.append( Vector( simulationSize, 1.6 ) ); + nlr.setTableau( &tableau ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4 ) ); - } - } - - void test_simulate_leaky_relu_and_sigmoid() - { - NLR::NetworkLevelReasoner nlr; + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - populateNetworkWithLeakyReluAndSigmoid( nlr ); + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), + + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ) + } ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - // With LeakyReLU/Sigmoid, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.7109, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 1.4602, - 0.0001 ) ); - } + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - // With LeakyReLU/Sigmoid, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.0654, Tightening::LB ), Tightening( 9, 2.9933, Tightening::UB ), + + Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 11, -2.9933, Tightening::LB ), Tightening( 11, -0.0654, Tightening::UB ) + } ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.4013, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0.6508, - 0.0001 ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - void test_simulate_softmax_and_max() + void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() { NLR::NetworkLevelReasoner nlr; + populateNetworkWithReluAndBilinear( nlr ); - populateNetworkWithSoftmaxAndMax( nlr ); + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); - // With LeakyReLU/Sigmoid, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, -3 ) ); + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + nlr.setTableau( &tableau ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -2.9998, - 0.0001 ) ); - } + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - // With LeakyReLU/Sigmoid, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -3 ) ); - simulations2.append( Vector( simulationSize, 3 ) ); + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + } ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1, - 0.0001 ) ); - } - } - - void test_simulate_relu_and_bilinear() - { - NLR::NetworkLevelReasoner nlr; + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - populateNetworkWithReluAndBilinear( nlr ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - // With Relu/Bilinear, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - } + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - // With ReLU/Bilinear, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } + Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } void test_concretize_input_assignment() @@ -4277,4 +5809,23 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite for ( const auto &bound : expectedBounds ) TS_ASSERT( bounds.exists( bound ) ); } + + bool boundsEqual( const List &bounds, const List &expectedBounds ) + { + if ( bounds.size() != expectedBounds.size() ) + return false; + + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= ( bound._type == expectedBound._type && bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } }; From ee5dd0065088ea472a93876f479a6ca12f9eaf02 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:16:12 +0300 Subject: [PATCH 08/72] 0-4-0: Implementing symbolic bound tightening for every activation function. 0-4-0: Implementing symbolic bound tightening for every activation function. --- src/nlr/Layer.cpp | 1316 ++++++++++++++++++++++++++++++++++++++++++++- src/nlr/Layer.h | 18 +- 2 files changed, 1316 insertions(+), 18 deletions(-) diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 73f787595f..c7f728e860 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -2,7 +2,7 @@ /*! \file Layer.cpp ** \verbatim ** Top contributors (to current version): - ** Guy Katz + ** Guy Katz, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -1336,7 +1336,7 @@ void Layer::computeSymbolicBounds() switch ( _type ) { case INPUT: - comptueSymbolicBoundsForInput(); + computeSymbolicBoundsForInput(); break; case WEIGHTED_SUM: @@ -1354,7 +1354,31 @@ void Layer::computeSymbolicBounds() case ABSOLUTE_VALUE: computeSymbolicBoundsForAbsoluteValue(); break; - + + case LEAKY_RELU: + computeSymbolicBoundsForLeakyRelu(); + break; + + case ROUND: + computeSymbolicBoundsForRound(); + break; + + case SIGMOID: + computeSymbolicBoundsForSigmoid(); + break; + + case MAX: + computeSymbolicBoundsForMax(); + break; + + case SOFTMAX: + computeSymbolicBoundsForSoftmax(); + break; + + case BILINEAR: + computeSymbolicBoundsForBilinear(); + break; + default: computeSymbolicBoundsDefault(); break; @@ -1396,7 +1420,7 @@ void Layer::computeSymbolicBoundsDefault() } } -void Layer::comptueSymbolicBoundsForInput() +void Layer::computeSymbolicBoundsForInput() { std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); @@ -1532,19 +1556,19 @@ void Layer::computeSymbolicBoundsForRelu() { for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicLb[j * _size + i] = 0; - + _symbolicLowerBias[i] = 0; } else - { + { for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicLb[j * _size + i] = _symbolicLb[j * _size + i] * _symbolicUbOfLb[i] / ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); - + _symbolicLowerBias[i] = _symbolicLowerBias[i] * _symbolicUbOfLb[i] / ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); } - + _symbolicLbOfLb[i] = 0; } else @@ -1661,6 +1685,8 @@ void Layer::computeSymbolicBoundsForSign() _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + std::cout << "sign, p-ll, ul, lu, uu: " << _symbolicLbOfLb[i] << " " << _symbolicUbOfLb[i] << " " << _symbolicLbOfUb[i] << " " << _symbolicUbOfUb[i] << " " << std::endl; // Has the b variable been fixed? if ( !FloatUtils::isNegative( sourceLb ) ) @@ -1674,6 +1700,9 @@ void Layer::computeSymbolicBoundsForSign() if ( signPhase == PHASE_NOT_FIXED ) { + PhaseStatus upperSignPhase = PHASE_NOT_FIXED; + PhaseStatus lowerSignPhase = PHASE_NOT_FIXED; + // If we got here, we know that lbLb < 0 and ubUb // > 0 @@ -1687,9 +1716,8 @@ void Layer::computeSymbolicBoundsForSign() _symbolicUb[j * _size + i] = 0; _symbolicUpperBias[i] = 1; - - _symbolicUbOfUb[i] = 1; - _symbolicLbOfUb[i] = 1; + + upperSignPhase = SIGN_PHASE_POSITIVE; } else { @@ -1704,9 +1732,6 @@ void Layer::computeSymbolicBoundsForSign() // Do the same for the bias, and then adjust _symbolicUpperBias[i] *= factor; _symbolicUpperBias[i] += 1; - - _symbolicUbOfUb[i] = 1; - _symbolicLbOfUb[i] = -1; } // Lower bound @@ -1720,8 +1745,7 @@ void Layer::computeSymbolicBoundsForSign() _symbolicLowerBias[i] = -1; - _symbolicUbOfLb[i] = -1; - _symbolicLbOfLb[i] = -1; + lowerSignPhase = SIGN_PHASE_NEGATIVE; } else { @@ -1730,15 +1754,38 @@ void Layer::computeSymbolicBoundsForSign() double factor = 2.0 / _symbolicUbOfUb[i]; for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { _symbolicLb[j * _size + i] *= factor; + std::cout << "sign, p-lb: " << _symbolicLb[j * _size + i] << std::endl; + } // Do the same for the bias, and then adjust _symbolicLowerBias[i] *= factor; _symbolicLowerBias[i] -= 1; - + std::cout << "sign, p-lB: " << _symbolicLowerBias[i] << std::endl; + } + + if (upperSignPhase == PHASE_NOT_FIXED) + { + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = -1; + } + else + { + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = 1; + } + + if (lowerSignPhase == PHASE_NOT_FIXED) + { _symbolicUbOfLb[i] = 1; _symbolicLbOfLb[i] = -1; } + else + { + _symbolicUbOfLb[i] = -1; + _symbolicLbOfLb[i] = -1; + } } else { @@ -1918,6 +1965,1241 @@ void Layer::computeSymbolicBoundsForAbsoluteValue() } } +void Layer::computeSymbolicBoundsForLeakyRelu() +{ + ASSERT( _alpha > 0 && _alpha < 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + /* + There are two ways we can determine that a LeakyReLU has become fixed: + + 1. If the LeakyReLU's variable has been externally fixed + 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) + */ + PhaseStatus leakyReluPhase = PHASE_NOT_FIXED; + + // Has the f variable been eliminated or fixed? + if ( FloatUtils::isPositive( _lb[i] ) ) + leakyReluPhase = RELU_PHASE_ACTIVE; + else if ( FloatUtils::isZero( _ub[i] ) ) + leakyReluPhase = RELU_PHASE_INACTIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A LeakyReLU initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + leakyReluPhase = RELU_PHASE_ACTIVE; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + leakyReluPhase = RELU_PHASE_INACTIVE; + } + + if ( leakyReluPhase == PHASE_NOT_FIXED ) + { + // LeakyReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + // Concrete upper bound: x_f <= ub_b + double width = sourceUb - sourceLb; + double coeff = ( sourceUb - _alpha * sourceLb ) / width; + + if ( _alpha <= 1 ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= coeff; + } + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= coeff; + _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; + + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + if ( sourceUb > sourceLb ) + { + // lambda = 1 + // Symbolic lower bound: x_f >= x_b + // Concrete lower bound: x_f >= sourceLb + + // Lower bounds are passed as is + } + else + { + // lambda = 1 + // Symbolic lower bound: x_f >= _alpha x_b + // Concrete lower bound: x_f >= 0 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= _alpha; + } + + _symbolicLowerBias[i] *= _alpha; + } + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= coeff; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= coeff; + _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; + + if ( sourceUb > sourceLb ) + { + // Upper bounds are passed as is + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= _alpha; + } + + _symbolicUpperBias[i] *= _alpha; + } + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + } + else + { + // The phase of this LeakyReLU is fixed! + if ( leakyReluPhase == RELU_PHASE_ACTIVE ) + { + // Positive LeakyReLU, bounds are propagated as is + } + else + { + // Negative LeakyReLU, bounds are multiplied by _alpha + _symbolicLbOfLb[i] *= _alpha; + _symbolicUbOfLb[i] *= _alpha; + _symbolicLbOfUb[i] *= _alpha; + _symbolicUbOfUb[i] *= _alpha; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= _alpha; + _symbolicLb[j * _size + i] *= _alpha; + } + + _symbolicLowerBias[i] *= _alpha; + _symbolicUpperBias[i] *= _alpha; + } + } + + if ( _symbolicUbOfUb[i] > sourceUb ) + _symbolicUbOfUb[i] = sourceUb; + if ( _symbolicLbOfLb[i] < _alpha * sourceLb ) + _symbolicLbOfLb[i] = _alpha * sourceLb; + + /* + We now have the tightest bounds we can for the leakyRelu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + + +void Layer::computeSymbolicBoundsForSigmoid() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A Sigmoid initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Bounds of lb, ub are the Sigmoids of source lb, ub + double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); + double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); + + // Case when the Sigmoid constraint is fixed + if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = 0; + _symbolicUb[j * _size + i] = 0; + } + + _symbolicLbOfUb[i] = sourceUbSigmoid; + _symbolicUbOfUb[i] = sourceUbSigmoid; + _symbolicLbOfLb[i] = sourceLbSigmoid; + _symbolicUbOfLb[i] = sourceLbSigmoid; + + _symbolicUpperBias[i] = sourceUbSigmoid; + _symbolicLowerBias[i] = sourceLbSigmoid; + } + + // Sigmoid not fixed + else + { + double lambda = ( _ub[i] - _lb[i] ) / ( sourceUb - sourceLb ); + double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), + SigmoidConstraint::sigmoidDerivative( sourceUb ) ); + + // update lower bound + if ( FloatUtils::isPositive( sourceLb ) ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= lambda; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= lambda; + _symbolicLowerBias[i] += sourceLbSigmoid - lambda * sourceLb; + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= lambdaPrime; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= lambdaPrime; + _symbolicLowerBias[i] += sourceLbSigmoid - lambdaPrime * sourceLb; + } + + // update upper bound + if ( !FloatUtils::isPositive( sourceUb ) ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= lambda; + } + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= lambda; + _symbolicUpperBias[i] += sourceUbSigmoid - lambda * sourceUb; + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= lambdaPrime; + } + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= lambdaPrime; + _symbolicUpperBias[i] += sourceUbSigmoid - lambdaPrime * sourceUb; + } + + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + } + + if ( _symbolicLbOfLb[i] < -1 ) + _symbolicLbOfLb[i] = -1; + if ( _symbolicUbOfUb[i] > 1 ) + _symbolicUbOfUb[i] = 1; + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeSymbolicBoundsForRound() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A Round initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + + // Bounds of lb, ub are the rounded values of source lb, ub + double sourceUbRound = FloatUtils::round( sourceUb ); + double sourceLbRound = FloatUtils::round( sourceLb ); + + _symbolicLbOfUb[i] = sourceUbRound; + _symbolicUbOfUb[i] = sourceUbRound; + _symbolicLbOfLb[i] = sourceLbRound; + _symbolicUbOfLb[i] = sourceLbRound; + + + // Case when the Round constraint is fixed + if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) + { + _symbolicUb[i] = 0; + _symbolicUpperBias[i] = sourceUbRound; + + _symbolicLb[i] = 0; + _symbolicLowerBias[i] = sourceLbRound; + } + + // Round not fixed + else + { + // Symbolic upper bound: x_f <= x_b + 0.5 + // Concrete upper bound: x_f <= round(ub_b) + + _symbolicUpperBias[i] += 0.5; + + // Symbolic lower bound: x_f >= x_b - 0.5 + // Concrete lower bound: x_f >= round(lb_b) + + _symbolicLowerBias[i] -= 0.5; + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeSymbolicBoundsForMax() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); + double maxLowerBound = FloatUtils::negativeInfinity(); + double maxUpperBound = FloatUtils::negativeInfinity(); + + Map sourceLbs; + Map sourceUbs; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs[sourceIndex] = sourceLb; + sourceUbs[sourceIndex] = sourceUb; + + if ( maxLowerBound < sourceLb ) + { + indexOfMaxLowerBound = sourceIndex; + maxLowerBound = sourceLb; + } + if ( maxUpperBound < sourceUb ) + { + maxUpperBound = sourceUb; + } + } + + // The phase is fixed if the lower-bound of a source variable x_b is + // larger than the upper-bounds of the other source variables. + bool phaseFixed = true; + for ( const auto &sourceIndex : sources ) + { + if ( sourceIndex != indexOfMaxLowerBound && + FloatUtils::gt( sourceUbs[sourceIndex], maxLowerBound ) ) + { + phaseFixed = false; + break; + } + } + + if ( phaseFixed ) + { + // Phase fixed + // Symbolic bound: x_b <= x_f <= x_b + // Concrete bound: lb_b <= x_f <= ub_b + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[indexOfMaxLowerBound._neuron]; + + + _symbolicLbOfLb[i] = maxLowerBound; + _symbolicUbOfLb[i] = maxLowerBound; + _symbolicLbOfUb[i] = sourceUbs[indexOfMaxLowerBound]; + _symbolicUbOfUb[i] = sourceUbs[indexOfMaxLowerBound]; + } + else + { + // MaxPool not fixed + // Symbolic bounds: x_b <= x_f <= maxUpperBound + // Concrete bounds: lb_b <= x_f <= maxUpperBound + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; + _symbolicUb[j * _size + i] = 0; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; + _symbolicUpperBias[i] = maxUpperBound; + + _symbolicLbOfLb[i] = maxLowerBound; + _symbolicUbOfLb[i] = maxLowerBound; + _symbolicLbOfUb[i] = maxUpperBound; + _symbolicUbOfUb[i] = maxUpperBound; + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeSymbolicBoundsForSoftmax() +{ + std::fill_n( _symbolicLowerBias, _size, 0 ); + std::fill_n( _symbolicUpperBias, _size, 0 ); + + double *symbolicLb = new double[_size * _size]; + double *symbolicUb = new double[_size * _size]; + std::fill_n( symbolicLb, _size * _size, 0 ); + std::fill_n( symbolicUb, _size * _size, 0 ); + + double *_work = new double[_size * _size]; + std::fill_n( _work, _size * _size, 0 ); + + Set handledInputNeurons; + unsigned sourceLayerSize = _size; + SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + sourceLayerSize = sourceLayer->getSize(); + ASSERT( sourceLayerSize == _size ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceMids; + Vector targetLbs; + Vector targetUbs; + unsigned len = 0; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceMids.append( ( sourceLb + sourceUb ) / 2 ); + targetLbs.append( _lb[i] ); + targetUbs.append( _ub[i] ); + + ++len; + } + + // Find the index of i in the softmax + unsigned index = 0; + for ( const auto &sourceIndex : sources ) + { + if ( handledInputNeurons.exists( sourceIndex._neuron ) ) + ++index; + else + { + handledInputNeurons.insert( sourceIndex._neuron ); + break; + } + } + + double lb = softmaxLinearLowerBound( sourceLbs, sourceUbs, index ); + double ub = softmaxLinearUpperBound( sourceLbs, sourceUbs, index ); + if ( _lb[i] < lb ) + { + _lb[i] = lb; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + if ( _ub[i] > ub ) + { + _ub[i] = ub; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + targetLbs[index] = _lb[i]; + targetUbs[index] = _ub[i]; + + if ( FloatUtils::areEqual( _lb[i], _ub[i] ) ) + { + _symbolicLowerBias[i] = _lb[i]; + _symbolicUpperBias[i] = _ub[i]; + for ( const auto &sourceIndex : sources ) + { + symbolicLb[len * sourceIndex._neuron + i] = 0; + symbolicUb[len * sourceIndex._neuron + i] = 0; + } + } + else + { + // Compute symbolic bound + if ( boundType == SoftmaxBoundType::LOG_SUM_EXP_DECOMPOSITION ) + { + bool useLSE2 = false; + for ( const auto &lb : targetLbs ) + { + if ( lb > GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD ) + useLSE2 = true; + } + unsigned inputIndex = 0; + if ( !useLSE2 ) + { + _symbolicLowerBias[i] = + softmaxLSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + for ( const auto &sourceIndex : sources ) + { + double dldj = + softmaxdLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[len * sourceIndex._neuron + i] = dldj; + _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + } + else + { + _symbolicLowerBias[i] = + softmaxLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + for ( const auto &sourceIndex : sources ) + { + double dldj = + softmaxdLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[len * sourceIndex._neuron + i] = dldj; + _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + } + + _symbolicUpperBias[i] = softmaxLSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + double dudj = + softmaxdLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + symbolicUb[len * sourceIndex._neuron + i] = dudj; + _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + } + else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) + { + _symbolicLowerBias[i] = softmaxERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + unsigned inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + double dldj = + softmaxdERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + symbolicLb[len * sourceIndex._neuron + i] = dldj; + _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + + _symbolicUpperBias[i] = softmaxERUpperBound( sourceMids, targetLbs, targetUbs, index ); + inputIndex = 0; + for ( const auto &sourceIndex : sources ) + { + double dudj = + softmaxdERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + symbolicUb[len * sourceIndex._neuron + i] = dudj; + _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + } + } + } + + for ( const auto &sourceLayerEntry : _sourceLayers ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); + + /* + Perform the multiplication + + newUB = oldUB * posWeights + oldLB * negWeights + newLB = oldUB * negWeights + oldLB * posWeights + */ + + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) + { + if ( symbolicLb[i] > 0 ) + _work[i] = symbolicLb[i]; + else + _work[i] = 0; + } + // _work is now positive weights in symbolicLb + matrixMultiplication( sourceLayer->getSymbolicLb(), + _work, + _symbolicLb, + _inputLayerSize, + sourceLayerSize, + _size ); + if ( sourceLayer->getSymbolicLowerBias() ) + matrixMultiplication( + sourceLayer->getSymbolicLowerBias(), _work, _symbolicLowerBias, 1, sourceLayerSize, _size ); + + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) + { + if ( symbolicLb[i] < 0 ) + _work[i] = symbolicLb[i]; + else + _work[i] = 0; + } + // _work is now negative weights in symbolicLb + matrixMultiplication( sourceLayer->getSymbolicUb(), + _work, + _symbolicLb, + _inputLayerSize, + sourceLayerSize, + _size ); + if ( sourceLayer->getSymbolicLowerBias() ) + matrixMultiplication( + sourceLayer->getSymbolicUpperBias(), _work, _symbolicLowerBias, 1, sourceLayerSize, _size ); + + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) + { + if ( symbolicUb[i] > 0 ) + _work[i] = symbolicUb[i]; + else + _work[i] = 0; + } + // _work is now positive weights in symbolicUb + matrixMultiplication( sourceLayer->getSymbolicUb(), + _work, + _symbolicUb, + _inputLayerSize, + sourceLayerSize, + _size ); + if ( sourceLayer->getSymbolicUpperBias() ) + matrixMultiplication( + sourceLayer->getSymbolicUpperBias(), _work, _symbolicUpperBias, 1, sourceLayerSize, _size ); + + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) + { + if ( symbolicUb[i] < 0 ) + _work[i] = symbolicUb[i]; + else + _work[i] = 0; + } + // _work is now negative weights in symbolicUb + matrixMultiplication( sourceLayer->getSymbolicLb(), + _work, + _symbolicUb, + _inputLayerSize, + sourceLayerSize, + _size ); + if ( sourceLayer->getSymbolicUpperBias() ) + matrixMultiplication( + sourceLayer->getSymbolicLowerBias(), _work, _symbolicUpperBias, 1, sourceLayerSize, _size ); + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + } + + if ( symbolicLb ) + { + delete[] symbolicLb; + symbolicLb = NULL; + } + if ( symbolicUb ) + { + delete[] symbolicUb; + symbolicUb = NULL; + } + if ( _work ) + { + delete[] _work; + _work = NULL; + } +} + +void Layer::computeSymbolicBoundsForBilinear() +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + ASSERT( sources.size() == 2 ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + bool allConstant = true; + unsigned indexA = 0; + unsigned indexB = 0; + unsigned counter = 0; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + + if ( counter == 0 ) + { + indexA = sourceIndex._neuron; + } + else + { + indexB = sourceIndex._neuron; + } + ++counter; + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicUpperBias[i] = sourceValues[0] * sourceValues[1]; + _symbolicLowerBias[i] = sourceValues[0] * sourceValues[1]; + continue; + } + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + // Symbolic lower bound: + // out >= alpha * x + beta * y + gamma + // where alpha = lb_y, beta = lb_x, gamma = -lb_x * lb_y + + // Symbolic upper bound: + // out <= alpha * x + beta * y + gamma + // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + if (sourceLbs[1] >= 0) + { + _symbolicLb[j * _size + i] += sourceLbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + else + { + _symbolicLb[j * _size + i] += sourceLbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + } + + if (sourceUbs[1] >= 0) + { + _symbolicUb[j * _size + i] += sourceUbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + } + else + { + _symbolicLb[j * _size + i] += sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + + if (sourceLbs[0] >= 0) + { + _symbolicLb[j * _size + i] += sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + } + else + { + _symbolicLb[j * _size + i] += sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; + } + } + _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; + _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + void Layer::computeSymbolicBoundsForWeightedSum() { std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index ceb16b3d68..4d1990ebc5 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -2,7 +2,7 @@ /*! \file Layer.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz + ** Guy Katz, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -275,6 +275,22 @@ class Layer double softmaxLinearUpperBound( const Vector &inputLbs, const Vector &inputUbs, unsigned i ); + + /* + Helper functions for symbolic bound tightening + */ + void computeSymbolicBoundsForInput(); + void computeSymbolicBoundsForRelu(); + void computeSymbolicBoundsForSign(); + void computeSymbolicBoundsForAbsoluteValue(); + void computeSymbolicBoundsForWeightedSum(); + void computeSymbolicBoundsForMax(); + void computeSymbolicBoundsForLeakyRelu(); + void computeSymbolicBoundsForSigmoid(); + void computeSymbolicBoundsForRound(); + void computeSymbolicBoundsForSoftmax(); + void computeSymbolicBoundsForBilinear(); + void computeSymbolicBoundsDefault(); /* Helper functions for interval bound tightening From 85fe8f3d7ca687b578c42f68255dfa7cfa13bea9 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:18:01 +0300 Subject: [PATCH 09/72] 0-4-1: Adding symbolic bound tightening tests for all activation functions. 0-4-1: Adding symbolic bound tightening tests for all activation functions. --- src/nlr/tests/Test_NetworkLevelReasoner.h | 6622 +++++++++++++-------- 1 file changed, 4267 insertions(+), 2355 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 33554ba680..42de1e97fa 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -2,7 +2,7 @@ /*! \file Test_NetworkLevelReasoner.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Andrew Wu + ** Guy Katz, Andrew Wu, Ido Shmuel ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -17,6 +17,7 @@ #include "FloatUtils.h" #include "Layer.h" #include "NetworkLevelReasoner.h" +#include "DeepPolySoftmaxElement.h" #include "Options.h" #include "Query.h" #include "Tightening.h" @@ -991,7 +992,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); } - void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr ) { /* @@ -1059,1743 +1059,1769 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); } - - void test_evaluate_relu() + + void populateNetworkSBTRelu( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ - double input[2]; - double output[2]; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - // With ReLUs, case 2 - input[0] = 1; - input[1] = 2; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); } - - void test_evaluate_sigmoids() + + void populateNetworkSBTReluResidual1( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* + -1 + __________________ + / \ + / 1 R -1 1 R 3 1 + x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 + \ / + \ 3 / + \________________________/ - populateNetworkWithSigmoids( nlr ); + */ - double input[2]; - double output[2]; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::RELU, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::RELU, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - // case 1 - input[0] = 0; - input[1] = 0; + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 1, 5 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 3 ); + nlr.setWeight( 0, 0, 3, 0, -1 ); + nlr.setWeight( 1, 0, 5, 0, 3 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); - // case 2 - input[0] = 1; - input[1] = 1; + nlr.setBias( 3, 0, 1 ); + nlr.setBias( 5, 0, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); + nlr.addActivationSource( 3, 0, 4, 0 ); - // case 3 - input[0] = 1; - input[1] = 2; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); } - void test_evaluate_abs() + void populateNetworkSBTReluResidual2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbs( nlr ); + /* + -1 + __________________ + / \ + / 1 R -1 1 R 3 1 1 + x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 --- x6 + \ / + \ 1 / + \_______________________________/ - double input[2]; - double output[2]; + */ - // With Abs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::RELU, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::RELU, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 6, NLR::Layer::WEIGHTED_SUM, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 6; ++i ) + nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 0, 5 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 3 ); + nlr.setWeight( 0, 0, 3, 0, -1 ); + nlr.setWeight( 0, 0, 5, 0, 1 ); + nlr.setWeight( 5, 0, 6, 0, 1 ); - // With Abs, case 1 - input[0] = -2; - input[1] = -2; + nlr.setBias( 3, 0, 1 ); + nlr.setBias( 5, 0, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + nlr.addActivationSource( 3, 0, 4, 0 ); - // With Abs, case 2 - input[0] = 1; - input[1] = 2; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 6 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); } - void test_evaluate_sign() + void populateNetworkSBTReluReindex( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* - populateNetworkWithSign( nlr ); + 1 1 1 1 + x0 --- x2 x5 --- x6 x9 --- x10 + \ /\ /\ / \ / \ / + 1 \ / R\ /-1\ / R \ / 1 \ / + \/ \/ \/ \/ \/ + /\ /\ /\ /\ /\ + 1 / \ R/ \ 1/ \ R / \ 1 / \ + / \/ \/ \ / \ / 0 \ + x1 --- x3 x4 --- x7 x8 --- x11 + -1 1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + */ - double input[2]; - double output[2]; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - // With Sign, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); - // With Sign, case 1 - input[0] = -2; - input[1] = -2; + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 0 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setBias( 5, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 0 ); - // With Sign, case 2 (0 considered "non-negative", sign(0)=1) - input[0] = -1; - input[1] = 1; + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); - } - - void test_evaluate_round() - { - NLR::NetworkLevelReasoner nlr; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - populateNetworkWithRound( nlr ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - double input[2]; - double output[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - // With Round, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 8 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - // With Round, case 1 - input[0] = 2.1; - input[1] = 1.4; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - - // With Round, case 2 - input[0] = 2.1; - input[1] = 1.6; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); } - void test_evaluate_leaky_relu() + void populateNetworkSBTLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - double input[2]; - double output[2]; - - // With Leaky ReLU, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + /* - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + 1 LR 1 LR 1 1 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 1 \ / 0 \ / + \/ \/ \/ + /\ /\ /\ + 1 / \ 1 / \ 1 / \ + / \ LR / \ LR / 1 \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + -1 -1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + using LeakyReLU activation instead of ReLU + */ - // With Leaky ReLU, case 1 (alpha=0.1) - input[0] = 1; - input[1] = 1; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.getLayer( 2 )->setAlpha( 0.2 ); + nlr.getLayer( 4 )->setAlpha( 0.2 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - // With Leaky ReLU, case 2 - input[0] = 1; - input[1] = 2; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); - } - - void test_evaluate_max() - { - NLR::NetworkLevelReasoner nlr; + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 0 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); - populateNetworkWithMax( nlr ); + nlr.setBias( 5, 0, 1 ); - double input[2]; - double output[1]; + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - // With Max, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - // With Max, case 1 - input[0] = 1; - input[1] = -3; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - // With Max, case 2 - input[0] = -3; - input[1] = 3; + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); } - - void test_evaluate_softmax() + void populateNetworkSBTSigmoidsAndRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); + /* - double input[2]; - double output[2]; + 1 S 1 Rd + x0 --- x2 ---> x4 --- x6 --- x8 + \ / \ / + 1 \ / 1 \ / + \/ \/ + /\ /\ + 1 / \ 1 / \ + / \ S / \ Rd + x1 --- x3 ---> x5 --- x7 --- x9 + -1 -1 - // With Softmax, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + */ - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); - // With Softmax, case 1 - input[0] = 1; - input[1] = -3; + // Mark layer dependencies + for ( unsigned i = 1; i <= 4; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); - // With Softmax, case 2 - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); - } - - void test_evaluate_bilinear() - { - NLR::NetworkLevelReasoner nlr; + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); - populateNetworkWithBilinear( nlr ); + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - double input[2]; - double output[1]; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - // With Bilinear, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - // With Bilinear, case 1 - input[0] = 0.1; - input[1] = -0.3; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - // With Bilinear, case 2 - input[0] = -0.3; - input[1] = 0.3; + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); + tableau.getBoundManager().initialize( 10 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); } - void test_evaluate_non_consecutive_layers() + void populateNetworkSBTMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* + + 1 R Max 2 + x0 --- x2 ---> x4 --- x6 ---> x7 + \ / / + 1 \ / / + \/ / + /\ / + 1 / \ / + / \ R / + x1 --- x3 ---> x5 + -1 + + */ // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::MAX, 1 ); + nlr.addLayer( 4, NLR::Layer::WEIGHTED_SUM, 1 ); // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); + for ( unsigned i = 1; i <= 4; ++i ) + nlr.addLayerDependency( i - 1, i ); - // Set the weights and relus + // Set the weights and biases for the weighted sum layers nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + nlr.setWeight( 3, 0, 4, 0, 2 ); + // Mark the ReLU sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); - - // Evaluate - double input[2]; - double output; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); - - input[0] = -1; - input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); + // Mark the Max sources + nlr.addActivationSource( 2, 0, 3, 0 ); + nlr.addActivationSource( 2, 1, 3, 0 ); - TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 7 ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); + tableau.getBoundManager().initialize( 8 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); } - - void test_evaluate_abs_and_relu() + + void populateNetworkSBTSoftmax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* - populateNetworkWithAbsAndRelu( nlr ); - - double input[2]; - double output[2]; - input[0] = 1; - input[1] = 1; + x0 x3 S x6 - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + x1 x4 S x7 - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + x2 x5 S x8 - input[0] = 1; - input[1] = 2; + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + x6 x7 x8 = softmax(x3, x4, x5) - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - } - - void test_evaluate_round_and_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRoundAndSign( nlr ); - - double input[2]; - double output[2]; + x9 = x6 + x7 + x8 + x10 = x6 + x7 + x8 - input[0] = 1.6; - input[1] = 1.4; + */ - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - input[0] = 1.6; - input[1] = 1.6; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - } - - void test_evaluate_leaky_relu_and_sigmoid() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyReluAndSigmoid( nlr ); - - double input[2]; - double output[2]; + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); - input[0] = 1; - input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - input[0] = 1; - input[1] = 2; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 8 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 11 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); } - void test_evaluate_softmax_and_max() + void populateNetworkSBTSoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmaxAndMax( nlr ); - - double input[2]; - double output[1]; + /* - input[0] = 1; - input[1] = -3; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + x0 x3 S x8 - TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); + x1 x4 S x9 - input[0] = -3; - input[1] = 3; + x2 x5 S x10 - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + x6 S x11 - TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); - } - - void test_evaluate_relu_and_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithReluAndBilinear( nlr ); - - double input[2]; - double output[1]; + x7 S x12 - input[0] = 1; - input[1] = 1; + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 + x6 = -x0 - x1 - x2 + 2 + x7 = -x0 - x1 - x2 + 1 - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + x8 x10 x12 = softmax(x3, x5, x7) - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + x9 x11 = softmax(x4, x6) - input[0] = 1; - input[1] = 2; + x13 = x8 + x10 + x12 + x14 = -x8 - x10 - x12 + x15 = x9 + x11 + x16 = -x9 - x11 - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + */ - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - } + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 5 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 5 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 4 ); - void test_store_into_other() - { - NLR::NetworkLevelReasoner nlr; + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - populateNetwork( nlr ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 0, 1, 3, -1 ); + nlr.setWeight( 0, 0, 1, 4, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 3, -1 ); + nlr.setWeight( 0, 1, 1, 4, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + nlr.setWeight( 0, 2, 1, 4, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 4, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + nlr.setWeight( 2, 4, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 2, 1 ); + nlr.setWeight( 2, 3, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 3, -1 ); + nlr.setWeight( 2, 3, 3, 3, -1 ); - NLR::NetworkLevelReasoner nlr2; + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); + nlr.setBias( 1, 3, 2 ); + nlr.setBias( 1, 4, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 4, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 4, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 4 ); + nlr.addActivationSource( 1, 2, 2, 4 ); + nlr.addActivationSource( 1, 4, 2, 4 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 3 ); + nlr.addActivationSource( 1, 3, 2, 3 ); - double input[2]; - double output1[2]; - double output2[2]; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - // Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 4 ), 7 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 4 ), 12 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 3 ), 16 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + tableau.getBoundManager().initialize( 17 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + } + + void populateNetworkSBTBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; + x0 x2 + x x4 -- x5 + x1 x3 - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + x2 = x0 - 2 * x1 + x3 = x0 + x1 + x4 = -x5 - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } + x4 = x2 * x3 + */ - void test_store_into_other_with_sigmoids() - { - NLR::NetworkLevelReasoner nlr; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - populateNetworkWithSigmoids( nlr ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - NLR::NetworkLevelReasoner nlr2; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -2 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); - double input[2]; - double output1[2]; - double output2[2]; - // case 1 - input[0] = 0; - input[1] = 0; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - // case 2 - input[0] = 1; - input[1] = 1; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 5 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); } - - void test_store_into_other_with_round() + + void test_evaluate_relu() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithRound( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + populateNetwork( nlr ); double input[2]; - double output1[2]; - double output2[2]; + double output[2]; - // case 1 + // With ReLUs, Inputs are zeros, only biases count input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // case 2 + // With ReLUs, case 1 input[0] = 1; input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); + + // With ReLUs, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); } - - void test_store_into_other_with_sign() + + void test_evaluate_sigmoids() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSign( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + populateNetworkWithSigmoids( nlr ); double input[2]; - double output1[2]; - double output2[2]; + double output[2]; // case 1 input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); // case 2 input[0] = 1; input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); + + // case 3 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); } - void test_store_into_other_with_abs() + void test_evaluate_abs() { NLR::NetworkLevelReasoner nlr; populateNetworkWithAbs( nlr ); - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - double input[2]; - double output1[2]; - double output2[2]; + double output[2]; - // case 1 + // With Abs, Inputs are zeros, only biases count input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // case 2 + // With Abs, case 1 + input[0] = -2; + input[1] = -2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Abs, case 2 input[0] = 1; - input[1] = 1; + input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); } - void test_store_into_other_with_leaky_relu() + void test_evaluate_sign() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyRelu( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + populateNetworkWithSign( nlr ); double input[2]; - double output1[2]; - double output2[2]; + double output[2]; - // case 1 + // With Sign, Inputs are zeros, only biases count input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // case 2 - input[0] = 1; + // With Sign, case 1 + input[0] = -2; + input[1] = -2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Sign, case 2 (0 considered "non-negative", sign(0)=1) + input[0] = -1; input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); } - void test_store_into_other_with_max() + void test_evaluate_round() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithMax( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + populateNetworkWithRound( nlr ); double input[2]; - double output1[2]; - double output2[2]; + double output[2]; - // case 1 + // With Round, Inputs are zeros, only biases count input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // case 2 - input[0] = 1; - input[1] = 1; + // With Round, case 1 + input[0] = 2.1; + input[1] = 1.4; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); + + // With Round, case 2 + input[0] = 2.1; + input[1] = 1.6; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); } - void test_store_into_other_with_softmax() + void test_evaluate_leaky_relu() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmax( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + populateNetworkWithLeakyRelu( nlr ); double input[2]; - double output1[2]; - double output2[2]; + double output[2]; - // case 1 + // With Leaky ReLU, Inputs are zeros, only biases count input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // case 2 + // With Leaky ReLU, case 1 (alpha=0.1) input[0] = 1; input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); + + // With Leaky ReLU, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); } - void test_store_into_other_with_bilinear() + void test_evaluate_max() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithBilinear( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + populateNetworkWithMax( nlr ); double input[2]; - double output1[2]; - double output2[2]; + double output[1]; - // case 1 + // With Max, Inputs are zeros, only biases count input[0] = 0; input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); - // case 2 + // With Max, case 1 input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + input[1] = -3; - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - void populateNetworkSBT( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - 2 R 1 - x0 --- x2 ---> x4 --- x6 - \ / / - 1 \ / / - \/ -1 / - /\ / - 3 / \ / - / \ R / - x1 --- x3 ---> x5 - 1 - */ + TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + // With Max, case 2 + input[0] = -3; + input[1] = 3; - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); + TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); + } + + + void test_evaluate_softmax() + { + NLR::NetworkLevelReasoner nlr; - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + populateNetworkWithSoftmax( nlr ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + double input[2]; + double output[2]; - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + // With Softmax, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + // With Softmax, case 1 + input[0] = 1; + input[1] = -3; - // Very loose bounds for neurons except inputs - double large = 1000000; + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); - tableau.getBoundManager().initialize( 7 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + // With Softmax, case 2 + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); } - - void test_sbt_relus_all_active() + + void test_evaluate_bilinear() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + populateNetworkWithBilinear( nlr ); - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + double input[2]; + double output[1]; - /* - Input ranges: + // With Bilinear, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - x0: [4, 6] - x1: [1, 5] + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - Layer 1: + // With Bilinear, case 1 + input[0] = 0.1; + input[1] = -0.3; - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + // With Bilinear, case 2 + input[0] = -0.3; + input[1] = 0.3; - Both ReLUs active, bound survive through activations: + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); + } + + void test_evaluate_non_consecutive_layers() + { + NLR::NetworkLevelReasoner nlr; - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + // Mark layer dependencies + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); - Layer 2: + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ - - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); - } + // Evaluate + double input[2]; + double output; - void test_sbt_relus_active_and_inactive() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + input[0] = 1; + input[1] = 1; - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + input[0] = -1; + input[1] = 2; - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); - /* - Input ranges: + TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); + } - x0: [4, 6] - x1: [1, 5] + void test_evaluate_abs_and_relu() + { + NLR::NetworkLevelReasoner nlr; - Layer 1: + populateNetworkWithAbsAndRelu( nlr ); + + double input[2]; + double output[2]; - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + input[0] = 1; + input[1] = 1; - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - First ReLU is inactive, bounds get zeroed - Second ReLU is active, bounds surive the activation + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - x4.lb = 0 - x4.ub = 0 + input[0] = 1; + input[1] = 2; - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - Layer 2: + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + } + + void test_evaluate_round_and_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRoundAndSign( nlr ); + + double input[2]; + double output[2]; - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] - */ + input[0] = 1.6; + input[1] = 1.4; - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), - } ); + input[0] = 1.6; + input[1] = 1.6; - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); } - - void test_sbt_relus_active_and_not_fixed() + + void test_evaluate_leaky_relu_and_sigmoid() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + double input[2]; + double output[2]; - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); + input[0] = 1; + input[1] = 1; - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - /* - Input ranges: + TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); - x0: [4, 6] - x1: [1, 5] + input[0] = 1; + input[1] = 2; - Layer 1: + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); + } + + void test_evaluate_softmax_and_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmaxAndMax( nlr ); + + double input[2]; + double output[1]; - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + input[0] = 1; + input[1] = -3; - First ReLU is undecided, bound is concretized. - Coefficient: 12/(12--4) = 12/16 = 0.75 - Second ReLU is active, bounds surive the activation + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - x4 range: [0, 12] - x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 - x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + input[0] = -3; + input[1] = 3; - Layer 2: + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - x6.lb = 0.5x0 + 1.25x1 - 11.25 - x6.ub = 0.5x0 + 1.25x1 - 8.25 + TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); + } + + void test_evaluate_relu_and_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithReluAndBilinear( nlr ); + + double input[2]; + double output[1]; - x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] - */ + input[0] = 1; + input[1] = 1; - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 1, Tightening::UB ), - } ); + input[0] = 1; + input[1] = 2; - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); } - void test_sbt_relus_active_and_externally_fixed() + void test_store_into_other() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBT( nlr, tableau ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + populateNetwork( nlr ); - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); + NLR::NetworkLevelReasoner nlr2; - // However, one of the ReLU's variables has been eliminated - nlr.eliminateVariable( 2, -3 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + double input[2]; + double output1[2]; + double output2[2]; - /* - Input ranges: + // Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - x0: [4, 6] - x1: [1, 5] + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - Layer 1: + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - First ReLU is inactive (set externally), bounds get zeroed - Second ReLU is active, bounds surive the activation + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - x4.lb = 0 - x4.ub = 0 + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; - Layer 2: + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] - */ + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } - List expectedBounds( { - // x2 does not appear, because it has been eliminated + void test_store_into_other_with_sigmoids() + { + NLR::NetworkLevelReasoner nlr; - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + populateNetworkWithSigmoids( nlr ); - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + NLR::NetworkLevelReasoner nlr2; - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + double input[2]; + double output1[2]; + double output2[2]; - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } + // case 1 + input[0] = 0; + input[1] = 0; - void test_sbt_abs_all_positive() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + // case 2 + input[0] = 1; + input[1] = 1; - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_round() + { + NLR::NetworkLevelReasoner nlr; - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + populateNetworkWithRound( nlr ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + NLR::NetworkLevelReasoner nlr2; - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + double input[2]; + double output1[2]; + double output2[2]; - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + // case 1 + input[0] = 0; + input[1] = 0; - // Very loose bounds for neurons except inputs - double large = 1000000; + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + // case 2 + input[0] = 1; + input[1] = 1; - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - /* - Input ranges: + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_sign() + { + NLR::NetworkLevelReasoner nlr; - x0: [4, 6] - x1: [1, 5] + populateNetworkWithSign( nlr ); - Layer 1: + NLR::NetworkLevelReasoner nlr2; - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + double input[2]; + double output1[2]; + double output2[2]; - Both absolute values positive, bound survive through activations: + // case 1 + input[0] = 0; + input[1] = 0; - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - Layer 2: + // case 2 + input[0] = 1; + input[1] = 1; - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_abs() + { + NLR::NetworkLevelReasoner nlr; - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + populateNetworkWithAbs( nlr ); - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); + NLR::NetworkLevelReasoner nlr2; - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } + double input[2]; + double output1[2]; + double output2[2]; - void test_sbt_abs_positive_and_negative() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + // case 1 + input[0] = 0; + input[1] = 0; - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + // case 2 + input[0] = 1; + input[1] = 1; - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + populateNetworkWithLeakyRelu( nlr ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + NLR::NetworkLevelReasoner nlr2; - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + double input[2]; + double output1[2]; + double output2[2]; - // Very loose bounds for neurons except inputs - double large = 1000000; + // case 1 + input[0] = 0; + input[1] = 0; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); + // case 2 + input[0] = 1; + input[1] = 1; - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - /* - Input ranges: + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_max() + { + NLR::NetworkLevelReasoner nlr; - x0: [4, 6] - x1: [1, 5] + populateNetworkWithMax( nlr ); - Layer 1: + NLR::NetworkLevelReasoner nlr2; - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + double input[2]; + double output1[2]; + double output2[2]; - First absolute value is negative, bounds get flipped - Second absolute value is positive, bounds surive the activation + // case 1 + input[0] = 0; + input[1] = 0; - x4.lb = -2x0 -3x1 + 30 : [3, 19] - x4.ub = -2x0 -3x1 + 30 : [3, 19] + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - Layer 2: + // case 2 + input[0] = 1; + input[1] = 1; - x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] - x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] - */ + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_softmax() + { + NLR::NetworkLevelReasoner nlr; - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 19, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + populateNetworkWithSoftmax( nlr ); - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 14, Tightening::UB ), - } ); + NLR::NetworkLevelReasoner nlr2; - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } + double input[2]; + double output1[2]; + double output2[2]; - void test_sbt_absolute_values_positive_and_not_fixed() + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_bilinear() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_generate_input_query() + { NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) + for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - // Very loose bounds for neurons except inputs - double large = 1000000; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); + // Set the weights and biases for the weighted sum layers - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First absolute value is undecided, bounds are concretized. - Second ReLU is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0 - x4.ub = 12 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 + 12 : [ 1, 7] - - x6 range: [-11, 7] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, 7, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_sbt_absolute_values_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, the weighted sum variable has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2 is eliminated, everything set to -3 - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Second absolute value is positive, bounds surive the activation - - x4: all set to 3 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 + 3 : [-8, -2] - x6.ub = - x0 - x1 + 3 : [-8, -2] - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, -2, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - printf( "Dumpign discovered bounds:\n" ); - for ( const auto &bound : bounds ) - bound.dump(); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : bounds ) - TS_ASSERT( expectedBounds.exists( bound ) ); - } - - void test_generate_input_query() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Variable indexing - - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - - // Set the weights and biases for the weighted sum layers - - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); nlr.setWeight( 2, 0, 3, 0, 1 ); nlr.setWeight( 2, 0, 3, 1, -1 ); @@ -3783,47 +3809,1162 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite simulations1.append( Vector( simulationSize, 1 ) ); simulations1.append( Vector( simulationSize, 1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + } + + // With ReLU/Bilinear, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + } + + void test_interval_arithmetic_bound_propagation_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_abs_constraints() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Layer dependenices + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_sign_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSign( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, 3 ); + tableau.setUpperBound( 0, 4 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, 4, Tightening::LB ), Tightening( 2, 5, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 11, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, 1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, 1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, 4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithLeakyRelu( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + + Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), + Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), + + Tightening( 12, -0.28, Tightening::LB ), Tightening( 12, 10.1, Tightening::UB ), + Tightening( 13, -1.42, Tightening::LB ), Tightening( 13, 37.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), + Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + + Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), + Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), + + Tightening( 12, -0.34, Tightening::LB ), Tightening( 12, 7.1, Tightening::UB ), + Tightening( 13, -1.3, Tightening::LB ), Tightening( 13, 29, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_round_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithRound( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, 1.4 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, -1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), + Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, -25, Tightening::LB ), Tightening( 13, 35, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3.1 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, 1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), + Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), + Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + + Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), + + Tightening( 12, -16, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -61, Tightening::LB ), Tightening( 13, 7, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_sigmoid_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSigmoids( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), + Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), + Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), + + Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), + Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), + + Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), + Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), + + Tightening( 12, 0.4441, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), + Tightening( 13, 2.2364, Tightening::LB ), Tightening( 13, 3.5399, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), + Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + + Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), + Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), + + Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), + Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), + + Tightening( 12, 0.3183, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), + Tightening( 13, 1.9963, Tightening::LB ), Tightening( 13, 3.6263, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_max_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithMax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), + Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + + Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), + Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), + Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), + + Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_softmax_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSoftmax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), + Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), + + Tightening( 12, 0.0240, Tightening::LB ), Tightening( 12, 0.8349, Tightening::UB ), + Tightening( 13, 0.5192, Tightening::LB ), Tightening( 13, 3.7629, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - } + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - // With ReLU/Bilinear, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), + Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + Tightening( 12, 0.0184, Tightening::LB ), Tightening( 12, 0.8678, Tightening::UB ), + Tightening( 13, 0.4151, Tightening::LB ), Tightening( 13, 3.8125, Tightening::UB ), + } ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - void test_interval_arithmetic_bound_propagation_relu_constraints() + void test_interval_arithmetic_bound_propagation_bilinear_constraints() { NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - + populateNetworkWithBilinear( nlr ); + + MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + tableau.getBoundManager().initialize( 12 ); // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 0, -0.1 ); + tableau.setUpperBound( 0, 0.1 ); + tableau.setLowerBound( 1, -0.1 ); + tableau.setUpperBound( 1, 0.1 ); double large = 1000; tableau.setLowerBound( 2, -large ); @@ -3846,10 +4987,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); nlr.setTableau( &tableau ); @@ -3860,34 +4997,32 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + Tightening( 2, 0.9, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), + Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), + Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), + Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), + + Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), + Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), + Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), + + Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), + + Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 0, -0.3 ); + tableau.setUpperBound( 0, 0.1 ); + tableau.setLowerBound( 1, -0.1 ); + tableau.setUpperBound( 1, 0.2 ); tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); @@ -3909,10 +5044,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); // Initialize TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -3921,109 +5052,39 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + Tightening( 2, 0.7, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), + Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), + Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), + Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), + + Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), + Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), + Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), + + Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), + + Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - - void test_interval_arithmetic_bound_propagation_abs_constraints() + + void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() { NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Layer dependenices - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + populateNetworkWithAbsAndRelu( nlr ); MockTableau tableau; tableau.getBoundManager().initialize( 14 ); // Initialize the bounds tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); + tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + tableau.setUpperBound( 1, 1 ); double large = 1000; tableau.setLowerBound( 2, -large ); @@ -4060,23 +5121,23 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), } ); List bounds; @@ -4121,42 +5182,42 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), - } ); + Tightening( 10, -10, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - void test_interval_arithmetic_bound_propagation_sign_constraints() + void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSign( nlr ); - + populateNetworkWithRoundAndSign( nlr ); + MockTableau tableau; tableau.getBoundManager().initialize( 14 ); // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 0, 1.4 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, -1.4 ); + tableau.setUpperBound( 1, 2.1 ); double large = 1000; tableau.setLowerBound( 2, -large ); @@ -4193,34 +5254,34 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), + Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds - tableau.setLowerBound( 0, 3 ); - tableau.setUpperBound( 0, 4 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 0, -3.1 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, 1.4 ); + tableau.setUpperBound( 1, 2.1 ); tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); @@ -4254,33 +5315,32 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds2( { - Tightening( 2, 4, Tightening::LB ), Tightening( 2, 5, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 11, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), + Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), + Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 10, 1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - Tightening( 12, 1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, 4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() + void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyRelu( nlr ); + populateNetworkWithLeakyReluAndSigmoid( nlr ); MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -4326,26 +5386,27 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), - Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), - Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), + Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), + Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), - Tightening( 12, -0.28, Tightening::LB ), Tightening( 12, 10.1, Tightening::UB ), - Tightening( 13, -1.42, Tightening::LB ), Tightening( 13, 37.4, Tightening::UB ), + Tightening( 12, 0.0573, Tightening::LB ), Tightening( 12, 0.9999, Tightening::UB ), + Tightening( 13, 0.1229, Tightening::LB ), Tightening( 13, 3.9996, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds @@ -4386,42 +5447,42 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), - Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), + Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), - Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), - Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), + Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), + Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), - Tightening( 12, -0.34, Tightening::LB ), Tightening( 12, 7.1, Tightening::UB ), - Tightening( 13, -1.3, Tightening::LB ), Tightening( 13, 29, Tightening::UB ), + Tightening( 12, 0.0323, Tightening::LB ), Tightening( 12, 0.9992, Tightening::UB ), + Tightening( 13, 0.1498, Tightening::LB ), Tightening( 13, 3.9972, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - void test_interval_arithmetic_bound_propagation_round_constraints() + void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithRound( nlr ); + populateNetworkWithSoftmaxAndMax( nlr ); MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + tableau.getBoundManager().initialize( 12 ); // Initialize the bounds - tableau.setLowerBound( 0, 1.4 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, -1.4 ); - tableau.setUpperBound( 1, 2.1 ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); double large = 1000; tableau.setLowerBound( 2, -large ); @@ -4444,10 +5505,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); nlr.setTableau( &tableau ); @@ -4458,34 +5515,97 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds( { - Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), - Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), - Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), - Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, -25, Tightening::LB ), Tightening( 13, 35, Tightening::UB ), + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ) } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds - tableau.setLowerBound( 0, -3.1 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, 1.4 ); - tableau.setUpperBound( 1, 2.1 ); + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.0654, Tightening::LB ), Tightening( 9, 2.9933, Tightening::UB ), + + Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 11, -2.9933, Tightening::LB ), Tightening( 11, -0.0654, Tightening::UB ) + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithReluAndBilinear( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -4506,10 +5626,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); // Initialize TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4517,45 +5635,34 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Perform the tightening pass TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - List expectedBounds2( { - Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), - Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), - Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - - Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - Tightening( 12, -16, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -61, Tightening::LB ), Tightening( 13, 7, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_sigmoid_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSigmoids( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + tableau.setUpperBound( 1, 2 ); - double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -4576,12 +5683,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); // Initialize TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -4589,287 +5690,608 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Perform the tightening pass TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_sbt_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. + Coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = 0.5x0 + 1.25x1 - 11.25 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] + */ + List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), - Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), - Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), - Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), - Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), - - Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), - Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), - Tightening( 12, 0.4441, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), - Tightening( 13, 2.2364, Tightening::LB ), Tightening( 13, 3.5399, Tightening::UB ), + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + void test_sbt_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); - // Initialize + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), - Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), - Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), - - Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), - Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), + Layer 1: - Tightening( 12, 0.3183, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), - Tightening( 13, 1.9963, Tightening::LB ), Tightening( 13, 3.6263, Tightening::UB ), + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_max_constraints() + + void test_sbt_relu_residual1() { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithMax( nlr ); - + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual1( nlr, tableau ); - // Initialize the bounds tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - nlr.setTableau( &tableau ); + /* + Input ranges: - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x0: [-1, 1] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 1: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), - Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + ReLU is undecided, bound is concretized. + Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] - Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + Layer 2 (with residual from x0): + + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. + Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + x4.lb = 0 + x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] + x4 range: [0, 2.5] + + Layer 3 (with residual from x1): + + x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] + + x5 range: [-2, 4] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 2, Tightening::LB ), + Tightening( 5, 5.5, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual2( nlr, tableau ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); + tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - // Initialize + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), - Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + x0: [-1, 1] - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), - Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + Layer 1: + + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. + Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] - Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + Layer 2 (with residual from x0): + + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. + Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + x4.lb = 0 + x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] + x4 range: [0, 2.5] + + Layer 3 (with residual from x0): + + x5.lb = 3 ( 0 ) + 1 ( x0 ) + 1 = 1x0 + 1 : [0, 2] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] + x5 range: [0, 7.5] + + Layer 4: + x6.lb = 1x0 + 1 : [0, 2] + x6.ub = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] + x6 range: [0, 7.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 7.5, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 7.5, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_interval_arithmetic_bound_propagation_softmax_constraints() + void test_sbt_relu_reindex() { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmax( nlr ); - + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + nlr.setTableau( &tableau ); + populateNetworkSBTReluReindex( nlr, tableau ); - // Initialize the bounds tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - nlr.setTableau( &tableau ); + /* + Input ranges: - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x0: [-1, 1] + x1: [-1, 1] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 1: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both ReLUs are undecided, bounds are concretized. + Coefficient: 2/( 2--2 ) = 2/4 = 0.5 - Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), - Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [0, 2] + + x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [0, 2] + + Layer 2: - Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] + x6 range: [-1, 3] + + x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] + x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] + x7 range: [-2, 2] + + Both ReLUs are undecided, bounds are concretized. + Coefficient (first ReLU, lower): 1/( 1--1 ) = 1/2 = 0.5 + Coefficient (first ReLU, upper): 1 (propagated as is) + Coefficient (second ReLU, lower): 0 (bound is zero) + Coefficient (second ReLU, upper): 2/( 2--2 ) = 2/4 = 0.5 - Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), - Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), + x8.lb = 0.5 ( x0 ) = 0.5x0 + x8.ub = x0 + 2 + x8 range: [0, 3] + + x9.lb = 0 + x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 + x9 range: [0, 2] + + Layer 3: + + x10.lb = 1 ( 0.5x0 ) + 1 ( 0 ) + 1 = 0.5x0 + 1 : [0.5, 1.5] + x10.ub = 1 ( x0 + 2 ) + 1 ( 0.5x1 + 1.5 ) + 1 = x0 + 0.5x1 + 4.5 : [3, 6] + x10 range: [0.5, 6] + + x11.lb = 0.5x1 - 0.5 + x11.ub = 0.5x1 + 1.5 + x11 range: [0, 2] + + */ - Tightening( 12, 0.0240, Tightening::LB ), Tightening( 12, 0.8349, Tightening::UB ), - Tightening( 13, 0.5192, Tightening::LB ), Tightening( 13, 3.7629, Tightening::UB ), - } ); + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, 0.5, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_abs_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); @@ -4881,66 +6303,110 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - // Initialize + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), - - Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), - Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), + Layer 1: - Tightening( 12, 0.0184, Tightening::LB ), Tightening( 12, 0.8678, Tightening::UB ), - Tightening( 13, 0.4151, Tightening::LB ), Tightening( 13, 3.8125, Tightening::UB ), + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both absolute values positive, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_bilinear_constraints() + + void test_sbt_abs_positive_and_negative() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; - populateNetworkWithBilinear( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); - // Initialize the bounds - tableau.setLowerBound( 0, -0.1 ); - tableau.setUpperBound( 0, 0.1 ); - tableau.setLowerBound( 1, -0.1 ); - tableau.setUpperBound( 1, 0.1 ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; - double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -4951,116 +6417,114 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - nlr.setTableau( &tableau ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Initialize + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds( { - Tightening( 2, 0.9, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), - Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), - Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), - Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - - Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), - Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), - Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), - - Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), - - Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), - } ); + Layer 1: - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - // Change the current bounds - tableau.setLowerBound( 0, -0.3 ); - tableau.setUpperBound( 0, 0.1 ); - tableau.setLowerBound( 1, -0.1 ); - tableau.setUpperBound( 1, 0.2 ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - List expectedBounds2( { - Tightening( 2, 0.7, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), - Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), - Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), - Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - - Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), - Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), + Layer 2: - Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), - Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), - - Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), - - Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() + void test_sbt_absolute_values_positive_and_not_fixed() { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithAbsAndRelu( nlr ); + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; - double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -5071,58 +6535,116 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - nlr.setTableau( &tableau ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Initialize + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is undecided, bounds are concretized. + Second absolute value is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - Tightening( 10, -5, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); @@ -5134,66 +6656,114 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - // Initialize + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + Layer 1: - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x2 is eliminated, everything set to -3 - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - Tightening( 10, -10, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + Second absolute value is positive, bounds surive the activation - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + x4: all set to 3 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), } ); - + + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() + void test_sbt_signs_positive_and_not_fixed() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; - populateNetworkWithRoundAndSign( nlr ); - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); - // Initialize the bounds - tableau.setLowerBound( 0, 1.4 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, -1.4 ); - tableau.setUpperBound( 1, 2.1 ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; - double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -5204,128 +6774,120 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - nlr.setTableau( &tableau ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Initialize + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds( { - Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), - Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), - - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + Layer 1: - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - // Change the current bounds - tableau.setLowerBound( 0, -3.1 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, 1.4 ); - tableau.setUpperBound( 1, 2.1 ); + First sign is undecided, bounds are concretized. + Second sign is active, bounds become constant 1 + Coefficient (first Sign, lower): 2/12 = 1/6. + Coefficient (first Sign, upper): -2/-4 = 1/2. - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + x4 range: [-1, 1] + x4.lb = 1/6 ( 2x0 + 3x1 - 15 ) - 1 = 2/6 x0 + 3/6 x1 - 21/6 + x4.ub = 1/2 ( 2x0 + 3x1 - 15 ) + 1 = x0 + 1.5x1 - 6.5 - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x5 range: [1, 1] + x5.lb = 1 + x5.ub = 1 + + Layer 2: - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + x6.lb = 1 ( 2/6 x0 + 3/6 x1 - 21/6 ) - 1 ( 1 ) = 2/6 x0 + 3/6 x1 - 27/6 : [-16/6, 0] + x6.ub = 1 ( x0 + 1.5x1 - 6.5 ) - 1 ( 1 ) = x0 + 1.5x1 - 7.5 : [-2, 6] - List expectedBounds2( { - Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), - Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), - Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x6 range: [-8/3, 6] + */ - Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2.6667, Tightening::LB ), + Tightening( 6, 6, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() + + void test_sbt_signs_active_and_externally_fixed() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyReluAndSigmoid( nlr ); - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; - double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -5336,354 +6898,704 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - nlr.setTableau( &tableau ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Initialize + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2 is eliminated, everything set to -3 + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is negative, bounds become constant -1 + Second sign is positive, bounds become constant 1 + + x4: all set to -1 + + x5: all set to 1 + + Layer 2: + + x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 + x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 + */ List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + // x2 does not appear, because it has been eliminated - Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), - - Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), - Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), - Tightening( 12, 0.0573, Tightening::LB ), Tightening( 12, 0.9999, Tightening::UB ), - Tightening( 13, 0.1229, Tightening::LB ), Tightening( 13, 3.9996, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, -1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 - // Change the current bounds - tableau.setLowerBound( 0, -3 ); + tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + tableau.setUpperBound( 1, 1 ); - // Initialize + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + x0: [-1, 1] + x1: [-1, 1] - Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Layer 1: - Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), - Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 - Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), - Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), + x4.lb = x0 + x1 + x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 + x4 range: [-0.4, 2] + + x5.lb = x0 - x1 + x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 + x5 range: [-0.4, 2] + + Layer 2: - Tightening( 12, 0.0323, Tightening::LB ), Tightening( 12, 0.9992, Tightening::UB ), - Tightening( 13, 0.1498, Tightening::LB ), Tightening( 13, 3.9972, Tightening::UB ), - } ); + x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] + x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] + x6 range: [-2, 2.8] + + x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] + x7 range: [-2.8, 2.8] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 + Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 + + Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 + Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 + + x8.lb = 2x0 + x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 + x8 range: [-0.4, 2.8] + + x9.lb = 0.4x0 + 1.6x1 - 0.8 + x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 + x9 range: [-0.56, 2.8] + + Layer 3: + + x10.lb = 1 ( 0.4x0 + 1.6x1 - 0.8 ) + 1 ( 2x0 ) + 1 = 2.4x0 + 1.6x1 + 0.2 : [-3.8, 5.2] + x10.ub = 1 ( -0.24 x0 + 0.96 x1 + 1.6 ) + 1 ( 0.8x0 + 2 ) + 1 = 0.56x0 + 0.96x1 + 4.6 : [3.08, 6.12] + x10 range: [-3.8, 6.12] + + x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x11.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] + x11 range: [-2.8, 2.8] + + */ + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), + Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), + + Tightening( 8, -0.4, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -0.56, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), + + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 6.12, Tightening::UB ), + Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) + + } ); + + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() + + void test_sbt_sigmoids_and_round() { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmaxAndMax( nlr ); - + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); + nlr.setTableau( &tableau ); + populateNetworkSBTSigmoidsAndRound( nlr, tableau ); - // Initialize the bounds tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + // Layer 1 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); + // Layer 2 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); + + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) + + def g_prime(x): + return g(x) * (1 - g(x)) + + def lam(l, u): + return (g(u) - g(l)) / (u - l) + + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + } + + void test_sbt_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); - // Initialize + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 2] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 3] + x2.ub = x0 + x1 : [-2, 3] + + x3.lb = x0 - x1 : [-3, 2] + x3.ub = x0 - x1 : [-3, 2] + + Both ReLUs are undecided, bounds are concretized. + Coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6 + Coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 + + x4.lb = 0.6 ( x0 + x1 ) = 0.6x0 + 0.6x1 + x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 + x4 range: [0, 3] + + x5.lb = 0.4 ( x0 - x1 ) = 0.4x0 + 0.4x1 + x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 + x5 range: [0, 2] + + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x4, and its upper bound is constant 3. + + x6.lb = 0.6x0 + 0.6x1 : [-1.2, 1.8] + x6.ub = 3 : [3, 3] + x6 range: [-1.2, 3] + + Layer 3: + + x7.lb = 2 ( 0.6x0 + 0.6x1 ) = 1.2x0 + 1.8x1 : [-2.4, 3.6] + x7.ub = 2 ( 3 ) = 6 : [6, 6] + x7 range: [-2.4, 6] + */ List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), - Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 3, Tightening::UB ), + Tightening( 3, -3, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2.4, Tightening::LB ), + Tightening( 7, 6, Tightening::UB ), - Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), - - Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ) } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + void test_sbt_max_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -3 ); + tableau.setUpperBound( 1, -2 ); - // Initialize + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + x0: [1, 2] + x1: [-3, -2] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 0] + x2.ub = x0 + x1 : [-2, 0] + + x3.lb = x0 - x1 : [3, 5] + x3.ub = x0 - x1 : [3, 5] + + First ReLU is negative, bounds become constant 0 + Second ReLU is positive, bounds survive the activation + + x4: all set to 0 + + x5.lb = x0 - x1 : [3, 5] + x5.ub = x0 - x1 : [3, 5] + + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds + + x6.lb = x0 - x1 : [3, 5] + x6.ub = x0 - x1 : [3, 5] + + Layer 3: + + x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 0, Tightening::UB ), + Tightening( 3, 3, Tightening::LB ), + Tightening( 3, 5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 3, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + Tightening( 6, 3, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 6, Tightening::LB ), + Tightening( 7, 10, Tightening::UB ), - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.0654, Tightening::LB ), Tightening( 9, 2.9933, Tightening::UB ), - - Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), - - Tightening( 11, -2.9933, Tightening::LB ), Tightening( 11, -0.0654, Tightening::UB ) } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() + + void test_sbt_softmax1() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; - populateNetworkWithReluAndBilinear( nlr ); - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); - // Initialize the bounds tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + } + + void test_sbt_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + } + void test_sbt_softmax3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax2( nlr, tableau ); - // Initialize + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + List expectedBounds({ Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), + Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.8668, Tightening::LB ), + Tightening( 8, 0.8668, Tightening::UB ), + Tightening( 9, 0.9820, Tightening::LB ), + Tightening( 9, 0.9820, Tightening::UB ), + Tightening( 10, 0.1173, Tightening::LB ), + Tightening( 10, 0.1173, Tightening::UB ), + Tightening( 11, 0.0179, Tightening::LB ), + Tightening( 11, 0.0179, Tightening::UB ), + Tightening( 12, 0.0159, Tightening::LB ), + Tightening( 12, 0.0159, Tightening::UB ), + Tightening( 13, 0.9470, Tightening::LB ), + Tightening( 13, 0.9470, Tightening::UB ), + Tightening( 14, -0.9470, Tightening::LB ), + Tightening( 14, -0.9470, Tightening::UB ), + Tightening( 15, 1.0253, Tightening::LB ), + Tightening( 15, 1.0253, Tightening::UB ), + Tightening( 16, -1.0253, Tightening::LB ), + Tightening( 16, -1.0253, Tightening::UB ) - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - - Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + void test_softmax_bounds_er() + { + Vector inputLb = { -1, 0, 1 }; + Vector inputUb = { 0, 2, 4 }; + Vector input = { -0.5, 1, 2.5 }; + + double value = NLR::DeepPolySoftmaxElement::ERLowerBound( input, inputLb, inputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0114799, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.00563867, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); + + + Vector outputLb = { 0.2, 0, 0 }; + Vector outputUb = { 0.4, 0.1, 0.1 }; + + value = NLR::DeepPolySoftmaxElement::ERUpperBound( input, outputLb, outputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, -1.44538, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 1.96538, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.358535, 0.00001 ) ); + } - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + void test_softmax_bounds_lse1() + { + Vector inputLb = { -1, 0, 1 }; + Vector inputUb = { 0, 2, 3 }; + Vector input = { -0.5, 1, 2 }; + double value = NLR::DeepPolySoftmaxElement::LSELowerBound( input, inputLb, inputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.00703444, 0.001 ) ); + + Vector outputLb = { 0.2, 0, 0 }; + Vector outputUb = { 0.4, 0.1, 0.1 }; + value = NLR::DeepPolySoftmaxElement::LSEUpperBound( input, outputLb, outputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.272204, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); + } + + void test_sbt_bilinear() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTBilinear( nlr, tableau ); - // Initialize + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x0: [1, 2] + x1: [-2, 1] + + Layer 1: - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + x2.lb = x0 - 2x1 : [-1, 6] + x2.ub = x0 - 2x1 : [-1, 6] - Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - - Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), - } ); + x3.lb = x0 + x1 : [-1, 3] + x3.ub = x0 + x1 : [-1, 3] - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + Coefficients for bilinear layer: + Lower bound: + alpha_l = x3.lb = -1 + beta = x2.lb = -1 + gamma_l = -x2.lb * x3.lb = --1 * -1 = -1 + + Upper bound: + alpha_u = x3.ub = 3 + beta = x2.lb = -1 + gamma_u = -x2.lb * x3.ub = --1 * 3 = 3 - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] + x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] + x4 range: [-7, 21] + + Layer 3: + + x7.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] + x7.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] + x4 range: [-21, 5] + */ + + List expectedBounds( { Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 6, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + Tightening( 4, -7, Tightening::LB ), + Tightening( 4, 21, Tightening::UB ), + Tightening( 5, -21, Tightening::LB ), + Tightening( 5, 7, Tightening::UB ) } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } void test_concretize_input_assignment() From 32d7c527b47cc05bed8702d37b3d1981a1e7773e Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:20:47 +0300 Subject: [PATCH 10/72] 0-5-0: Adding rounding constant for LP relaxation propagation. 0-5-0: Adding rounding constant for LP relaxation propagation. --- src/configuration/GlobalConfiguration.cpp | 1 + src/configuration/GlobalConfiguration.h | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index c70ce013ef..d8529ee7c1 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -71,6 +71,7 @@ const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; const double GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT = 0.00000000001; +const double GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT = 0.00000001; const double GlobalConfiguration::SIGMOID_CUTOFF_CONSTANT = 20; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index c89c1d54b3..ca06a28735 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -195,8 +195,9 @@ class GlobalConfiguration Symbolic bound tightening options */ - // Symbolic tightening rounding constant + // Symbolic tightening, LP rounding constants static const double SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; + static const double LP_TIGHTENING_ROUNDING_CONSTANT; static const double SIGMOID_CUTOFF_CONSTANT; From 5a7c5d8d63756bd8dd55b5ab57642448043bee49 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:24:17 +0300 Subject: [PATCH 11/72] 0-5-1: Implementing forward-backward LP propagation algorithm for all activation functions. 0-5-1: Implementing forward-backward LP propagation algorithm for all activation functions. --- src/nlr/LPFormulator.cpp | 550 ++++++++++++++++++++++++++++++++++++++- src/nlr/LPFormulator.h | 17 +- 2 files changed, 560 insertions(+), 7 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 552fa64617..9341c7188e 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -21,6 +21,7 @@ #include "MStringf.h" #include "NLRError.h" #include "Options.h" +#include "DeepPolySoftmaxElement.h" #include "TimeUtils.h" #include "Vector.h" @@ -322,7 +323,14 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map TimeUtils::timePassed( gurobiStart, gurobiEnd ) / 1000000 ) .ascii() ); + // Clean up clearSolverQueue( freeSolvers ); + + if ( threads ) + { + delete[] threads; + threads = NULL; + } if ( infeasible ) throw InfeasibleQueryException(); @@ -492,7 +500,16 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, threads[i].interrupt(); threads[i].join(); } + + // Clean up clearSolverQueue( freeSolvers ); + + if ( threads ) + { + delete[] threads; + threads = NULL; + } + throw InfeasibleQueryException(); } @@ -569,7 +586,8 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & { LPFormulator_LOG( Stringf( "Computing upperbound..." ).ascii() ); double ub = optimizeWithGurobi( - *gurobi, MinOrMax::MAX, variableName, cutoffValue, &infeasible ); + *gurobi, MinOrMax::MAX, variableName, cutoffValue, &infeasible ) + + GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT;; LPFormulator_LOG( Stringf( "Upperbound computed %f", ub ).ascii() ); // Store the new bound if it is tighter @@ -599,7 +617,8 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & LPFormulator_LOG( Stringf( "Computing lowerbound..." ).ascii() ); gurobi->reset(); double lb = optimizeWithGurobi( - *gurobi, MinOrMax::MIN, variableName, cutoffValue, &infeasible ); + *gurobi, MinOrMax::MIN, variableName, cutoffValue, &infeasible ) - + GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT; LPFormulator_LOG( Stringf( "Lowerbound computed: %f", lb ).ascii() ); // Store the new bound if it is tighter if ( lb > currentLb ) @@ -670,7 +689,6 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers } } - void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -688,10 +706,18 @@ void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, case Layer::WEIGHTED_SUM: addWeightedSumLayerToLpRelaxation( gurobi, layer, createVariables ); break; + + case Layer::ROUND: + addRoundLayerToLpRelaxation( gurobi, layer, createVariables ); + break; case Layer::LEAKY_RELU: addLeakyReluLayerToLpRelaxation( gurobi, layer, createVariables ); break; + + case Layer::ABSOLUTE_VALUE: + addAbsoluteValueLayerToLpRelaxation( gurobi, layer, createVariables ); + break; case Layer::SIGN: addSignLayerToLpRelaxation( gurobi, layer, createVariables ); @@ -700,6 +726,18 @@ void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, case Layer::MAX: addMaxLayerToLpRelaxation( gurobi, layer, createVariables ); break; + + case Layer::SIGMOID: + addSigmoidLayerToLpRelaxation( gurobi, layer, createVariables ); + break; + + case Layer::SOFTMAX: + addSoftmaxLayerToLpRelaxation( gurobi, layer, createVariables ); + break; + + case Layer::BILINEAR: + addBilinearLayerToLpRelaxation( gurobi, layer, createVariables ); + break; default: throw NLRError( NLRError::LAYER_TYPE_NOT_SUPPORTED, "LPFormulator" ); @@ -807,6 +845,244 @@ void LPFormulator::addReluLayerToLpRelaxation( GurobiWrapper &gurobi, } } + +void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = FloatUtils::round(sourceValue); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + double ub = std::max( FloatUtils::round( sourceUb ), layer->getUb( i ) ); + double lb = std::min( FloatUtils::round( sourceLb ), layer->getLb( i ) ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // If u = l: y = round(u) + if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) + { + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, ub ); + } + + else + { + List terms; + // y <= x + 0.5, i.e. y - x <= 0.5 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, 0.5 ); + + // y >= x - 0.5, i.e. y - x >= -0.5 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, -0.5 ); + } + } + } +} + + +void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : -sourceValue; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + double ub = std::min( FloatUtils::max( -sourceLb, sourceUb ), layer->getUb( i ) ); + + gurobi.addVariable(Stringf( "x%u", targetVariable ), 0, ub ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The AbsoluteValue is active, y = x + if ( sourceLb < 0 ) + sourceLb = 0; + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // The AbsoluteValue is inactive, y = -x, i.e. y + x = 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + /* + The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). + */ + + // y >= 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y <= max(-lb, ub) + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addLeqConstraint( terms, ub ); + } + } + } +} + + +void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = SigmoidConstraint::sigmoid(sourceValue); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + double ub = std::min( SigmoidConstraint::sigmoid( sourceUb ), layer->getUb( i ) ); + double lb = std::max( SigmoidConstraint::sigmoid( sourceLb ), layer->getLb( i ) ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // If u = l: y = sigmoid(u) + if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) + { + List terms; + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, ub ); + } + + else + { + List terms; + double lambda = ( ub - lb ) / ( sourceUb - sourceLb ); + double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), + SigmoidConstraint::sigmoidDerivative( sourceUb ) ); + + // update lower bound + if ( FloatUtils::isPositive( sourceLb ) ) + { + // y >= lambda * (x - l), i.e. y - lambda * x >= lambda * l + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, sourceLb * lambda ); + } + + else + { + // y >= lambda' * (x - l), i.e. y - lambda' * x >= lambda' * l + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, sourceLb * lambdaPrime ); + } + + // update upper bound + if ( !FloatUtils::isPositive( sourceUb ) ) + { + // y <= lambda * (x - u), i.e. y - lambda * x <= lambda * u + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, sourceUb * lambda ); + } + else + { + // y <= lambda' * (x - u), i.e. y - lambda' * x <= lambda' * u + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, sourceUb * lambdaPrime ); + } + } + } + } +} + + + void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -890,6 +1166,7 @@ void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, } } + void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -971,6 +1248,267 @@ void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, } } + +void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( layer->neuronEliminated( i ) ) + continue; + + Set handledInputNeurons; + List sources = layer->getActivationSources( i ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceMids; + Vector targetLbs; + Vector targetUbs; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceMids.append( ( sourceLb + sourceUb ) / 2 ); + targetLbs.append( layer->getLb( i ) ); + targetUbs.append( layer->getUb( i ) ); + } + + // Find the index of i in the softmax + unsigned index = 0; + for ( const auto &source : sources ) + { + if ( handledInputNeurons.exists( source._neuron ) ) + ++index; + else + { + handledInputNeurons.insert( source._neuron ); + break; + } + } + + double ub = std::min( DeepPolySoftmaxElement::linearUpperBound( sourceLbs, sourceUbs, index ), layer->getUb( i ) ); + double lb = std::max( DeepPolySoftmaxElement::linearLowerBound( sourceLbs, sourceUbs, index ), layer->getLb( i ) ); + + unsigned targetVariable = layer->neuronToVariable( i ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + double bias; + SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); + + + List terms; + if ( FloatUtils::areEqual( lb, ub ) ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, ub ); + } + else + { + // Compute symbolic bound + if ( boundType == SoftmaxBoundType::LOG_SUM_EXP_DECOMPOSITION ) + { + bool useLSE2 = false; + for ( const auto &lb : targetLbs ) + { + if ( lb > GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD ) + useLSE2 = true; + } + unsigned inputIndex = 0; + if ( !useLSE2 ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dldj = + DeepPolySoftmaxElement::dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addGeqConstraint( terms, bias ); + } + else + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dldj = + DeepPolySoftmaxElement::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addGeqConstraint( terms, bias ); + } + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + inputIndex = 0; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dudj = + DeepPolySoftmaxElement::dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addLeqConstraint( terms, bias ); + } + else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + unsigned inputIndex = 0; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dldj = + DeepPolySoftmaxElement::dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addGeqConstraint( terms, bias ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); + inputIndex = 0; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dudj = + DeepPolySoftmaxElement::dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addLeqConstraint( terms, bias ); + } + } + } +} + +void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, + bool createVariables ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + Vector sourceNeurons; + unsigned counter = 0; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + + sourceNeurons.append( sourceNeuron ); + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + ++counter; + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + ++counter; + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + double targetValue = sourceValues[0] * sourceValues[1]; + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + continue; + } + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // Lower bound: out >= l_y * x + l_x * y - l_x * l_y + List terms; + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -sourceLbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( -sourceLbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); + + // Upper bound: out <= u_y * x + l_x * y - l_x * u_y + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -sourceUbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( -sourceLbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); + } + } +} + + void LPFormulator::addWeightedSumLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1074,7 +1612,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, if ( !FloatUtils::isNegative( sourceLb ) ) { - // The ReLU is active, y = x + // The LeakyReLU is active, y = x List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1083,7 +1621,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, } else if ( !FloatUtils::isPositive( sourceUb ) ) { - // The ReLU is inactive, y = alpha * x + // The LeakyReLU is inactive, y = alpha * x List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); @@ -1096,7 +1634,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; /* - The phase of this ReLU is not yet fixed. + The phase of this LeakyReLU is not yet fixed. For y = LeakyReLU(x), we add the following triangular relaxation: 1. y >= alpha * x 2. y >= x diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index 9a12a9e617..135854bf6b 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -103,7 +103,22 @@ class LPFormulator : public ParallelSolver void addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - + + void + addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); + + void + addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); + + void + addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); + + void + addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); + + void + addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); + void addWeightedSumLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); From 79f7a845e05ba789bf854deca01069422fe42bd4 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:25:47 +0300 Subject: [PATCH 12/72] 0-5-2: Adding forward-backward LP propagation tests for all activation functions. 0-5-2: Adding forward-backward LP propagation tests for all activation functions. --- src/nlr/tests/Test_NetworkLevelReasoner.h | 13076 +++++++++++++------- 1 file changed, 8774 insertions(+), 4302 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 42de1e97fa..bc4d620609 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -2,7 +2,7 @@ /*! \file Test_NetworkLevelReasoner.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Andrew Wu, Ido Shmuel + ** Guy Katz, Andrew Wu ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -1932,892 +1932,1964 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 5, -large ); tableau.setUpperBound( 5, large ); } - - void test_evaluate_relu() + + void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* - populateNetwork( nlr ); + 1 R -1 R -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ R / \ R / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 - double input[2]; - double output[2]; + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using ReLU activation instead of LeakyReLU + */ - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); + nlr.setBias( 5, 1, 2 ); - // With ReLUs, case 2 - input[0] = 1; - input[1] = 2; + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); - } + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - void test_evaluate_sigmoids() - { - NLR::NetworkLevelReasoner nlr; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - populateNetworkWithSigmoids( nlr ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - double input[2]; - double output[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - // case 1 - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - // case 2 - input[0] = 1; - input[1] = 1; + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = ReLU( x3 ) + x8 = ReLU( x4 ) + x9 = ReLU( x5 ) + x10 = ReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = ReLU( x11 ) + x15 = ReLU( x12 ) + x16 = ReLU( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = ReLU( x17 ) + x20 = ReLU( x18 ) + + x21 = x19 - x20 - 1 + */ - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::RELU, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); - // case 3 - input[0] = 1; - input[1] = 2; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); - } - - void test_evaluate_abs() - { - NLR::NetworkLevelReasoner nlr; + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); - populateNetworkWithAbs( nlr ); + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); - double input[2]; - double output[2]; + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); - // With Abs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - // With Abs, case 1 - input[0] = -2; - input[1] = -2; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - // With Abs, case 2 - input[0] = 1; - input[1] = 2; + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); } - void test_evaluate_sign() + void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* - populateNetworkWithSign( nlr ); + 1 S -1 S -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ S / \ S / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 - double input[2]; - double output[2]; + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Sigmoid activation instead of LeakyReLU + */ - // With Sign, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); - // With Sign, case 1 - input[0] = -2; - input[1] = -2; + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + nlr.setBias( 5, 1, 2 ); - // With Sign, case 2 (0 considered "non-negative", sign(0)=1) - input[0] = -1; - input[1] = 1; + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); - } - - void test_evaluate_round() - { - NLR::NetworkLevelReasoner nlr; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - populateNetworkWithRound( nlr ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - double input[2]; - double output[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - // With Round, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - // With Round, case 1 - input[0] = 2.1; - input[1] = 1.4; + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Sigmoid( x3 ) + x8 = Sigmoid( x4 ) + x9 = Sigmoid( x5 ) + x10 = Sigmoid( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Sigmoid( x11 ) + x15 = Sigmoid( x12 ) + x16 = Sigmoid( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Sigmoid( x17 ) + x20 = Sigmoid( x18 ) + + x21 = x19 - x20 - 1 + */ - TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - // With Round, case 2 - input[0] = 2.1; - input[1] = 1.6; + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); - } - - void test_evaluate_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); - populateNetworkWithLeakyRelu( nlr ); + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); - double input[2]; - double output[2]; + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); - // With Leaky ReLU, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - // With Leaky ReLU, case 1 (alpha=0.1) - input[0] = 1; - input[1] = 1; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - // With Leaky ReLU, case 2 - input[0] = 1; - input[1] = 2; + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); } - void test_evaluate_max() + void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithMax( nlr ); + /* - double input[2]; - double output[1]; + 1 Sign -1 Sign -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ Sign / \ Sign / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 - // With Max, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Sign activation instead of LeakyReLU + */ - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - // With Max, case 1 - input[0] = 1; - input[1] = -3; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); - // With Max, case 2 - input[0] = -3; - input[1] = 3; + nlr.setBias( 5, 1, 2 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); - } - - - void test_evaluate_softmax() - { - NLR::NetworkLevelReasoner nlr; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - populateNetworkWithSoftmax( nlr ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - double input[2]; - double output[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - // With Softmax, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - // With Softmax, case 1 - input[0] = 1; - input[1] = -3; + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - // With Softmax, case 2 - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); } - void test_evaluate_bilinear() + void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Sign( x3 ) + x8 = Sign( x4 ) + x9 = SIgn( x5 ) + x10 = Sign( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Sign( x11 ) + x15 = Sign( x12 ) + x16 = Sign( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Sign( x17 ) + x20 = Sign( x18 ) + + x21 = x19 - x20 - 1 + */ - populateNetworkWithBilinear( nlr ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - double input[2]; - double output[1]; + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); - // With Bilinear, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); - // With Bilinear, case 1 - input[0] = 0.1; - input[1] = -0.3; + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); - // With Bilinear, case 2 - input[0] = -0.3; - input[1] = 0.3; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); } - void test_evaluate_non_consecutive_layers() + void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* + + 1 Rnd -1 Rnd -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ Rnd / \ Rnd / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Round activation instead of LeakyReLU + */ // Create the layers nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 2 ); nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - // Set the weights and relus + // Set the weights and biases for the weighted sum layers nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + nlr.setBias( 5, 1, 2 ); + + // Mark the Round sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); - - // Evaluate - double input[2]; - double output; - input[0] = 1; - input[1] = 1; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - input[0] = -1; - input[1] = 2; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); - } + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - void test_evaluate_abs_and_relu() - { - NLR::NetworkLevelReasoner nlr; + // Very loose bounds for neurons except inputs + double large = 1000000; - populateNetworkWithAbsAndRelu( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); } - void test_evaluate_round_and_sign() + void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRoundAndSign( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1.6; - input[1] = 1.4; + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Round( x3 ) + x8 = Round( x4 ) + x9 = Round( x5 ) + x10 = Round( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Round( x11 ) + x15 = Round( x12 ) + x16 = Round( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Round( x17 ) + x20 = Round( x18 ) + + x21 = x19 - x20 - 1 + */ - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); - input[0] = 1.6; - input[1] = 1.6; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - } - - void test_evaluate_leaky_relu_and_sigmoid() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyReluAndSigmoid( nlr ); + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); - input[0] = 1; - input[1] = 2; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); - } - - void test_evaluate_softmax_and_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmaxAndMax( nlr ); - - double input[2]; - double output[1]; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - input[0] = 1; - input[1] = -3; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - input[0] = -3; - input[1] = 3; + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); } - void test_evaluate_relu_and_bilinear() + void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithReluAndBilinear( nlr ); - - double input[2]; - double output[1]; + /* - input[0] = 1; - input[1] = 1; + 1 A -1 A -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ A / \ A / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Absolute value activation instead of LeakyReLU + */ - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - input[0] = 1; - input[1] = 2; + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - } + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); - void test_store_into_other() - { - NLR::NetworkLevelReasoner nlr; + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); - populateNetwork( nlr ); + nlr.setBias( 5, 1, 2 ); - NLR::NetworkLevelReasoner nlr2; + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - double input[2]; - double output1[2]; - double output2[2]; + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - // Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); } - - void test_store_into_other_with_sigmoids() + + void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Abs( x3 ) + x8 = Abs( x4 ) + x9 = Abs( x5 ) + x10 = Abs( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Abs( x11 ) + x15 = Abs( x12 ) + x16 = Abs( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Abs( x17 ) + x20 = Abs( x18 ) + + x21 = x19 - x20 - 1 + */ - // case 1 - input[0] = 0; - input[1] = 0; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); - // case 2 - input[0] = 1; - input[1] = 1; + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_round() - { - NLR::NetworkLevelReasoner nlr; + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); - populateNetworkWithRound( nlr ); + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); - NLR::NetworkLevelReasoner nlr2; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - double input[2]; - double output1[2]; - double output2[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - // case 1 - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - // case 2 - input[0] = 1; - input[1] = 1; + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); } - void test_store_into_other_with_sign() + void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* - populateNetworkWithSign( nlr ); + 1 LR -1 LR -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ LR / \ LR / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 - NLR::NetworkLevelReasoner nlr2; + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + */ - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); - double input[2]; - double output1[2]; - double output2[2]; + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - // case 1 - input[0] = 0; - input[1] = 0; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); - // case 2 - input[0] = 1; - input[1] = 1; + nlr.setBias( 5, 1, 2 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_abs() - { - NLR::NetworkLevelReasoner nlr; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - populateNetworkWithAbs( nlr ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - NLR::NetworkLevelReasoner nlr2; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - double input[2]; - double output1[2]; - double output2[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - // case 1 - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); } - void test_store_into_other_with_leaky_relu() + void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = LeakyReLU( x3 ) + x8 = LeakyReLU( x4 ) + x9 = LeakyReLU( x5 ) + x10 = LeakyReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = LeakyReLU( x11 ) + x15 = LeakyReLU( x12 ) + x16 = LeakyReLU( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = LeakyReLU( x17 ) + x20 = LeakyReLU( x18 ) + + x21 = x19 - x20 - 1 + */ - // case 1 - input[0] = 0; - input[1] = 0; + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + nlr.getLayer( 6 )->setAlpha( 0.1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); - // case 2 - input[0] = 1; - input[1] = 1; + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_max() - { - NLR::NetworkLevelReasoner nlr; + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); - populateNetworkWithMax( nlr ); + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); - NLR::NetworkLevelReasoner nlr2; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - double input[2]; - double output1[2]; - double output2[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - // case 1 - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - // case 2 - input[0] = 1; - input[1] = 1; + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); } - void test_store_into_other_with_softmax() + void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); - - NLR::NetworkLevelReasoner nlr2; + /* + a a' + x e + b b' f h + y d + c c' + */ - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - double input[2]; - double output1[2]; - double output2[2]; + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - // case 1 - input[0] = 0; - input[1] = 0; + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + nlr.setWeight( 4, 0, 5, 0, -1 ); - // case 2 - input[0] = 1; - input[1] = 1; + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_bilinear() - { - NLR::NetworkLevelReasoner nlr; + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); - populateNetworkWithBilinear( nlr ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - NLR::NetworkLevelReasoner nlr2; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - double input[2]; - double output1[2]; - double output2[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - // case 1 - input[0] = 0; - input[1] = 0; + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); } - void test_generate_input_query() + + void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* + x3 x7 + x0 x11 x14 + x4 x8 + x1 x12 x15 x17 + x5 x9 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14, x15, x16 = Softmax( x11, x12, x13 ) + + x17 = Max( x14, x15, x16 ) + */ // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 5, NLR::Layer::MAX, 1 ); // Mark layer dependencies for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); - // Variable indexing + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 3, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 3 ); + nlr.addActivationSource( 1, 1, 2, 3 ); + nlr.addActivationSource( 1, 2, 2, 3 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + nlr.addActivationSource( 3, 2, 4, 0 ); + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 1 ); + nlr.addActivationSource( 3, 0, 4, 2 ); + nlr.addActivationSource( 3, 1, 4, 2 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 4, 0, 5, 0 ); + nlr.addActivationSource( 4, 1, 5, 0 ); + nlr.addActivationSource( 4, 2, 5, 0 ); + // Variable indexing nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - // Set the weights and biases for the weighted sum layers + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 18 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + } + + void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + a a' + x e + b b' f h + y d + c c' + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + // Set the weights and biases for the weighted sum layers nlr.setWeight( 0, 0, 1, 0, 1 ); nlr.setWeight( 0, 0, 1, 1, 2 ); nlr.setWeight( 0, 1, 1, 1, -3 ); @@ -2828,700 +3900,520 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 2, 1, 3, 0, 1 ); nlr.setWeight( 2, 1, 3, 1, 1 ); nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); + nlr.setWeight( 4, 0, 5, 0, -1 ); nlr.setBias( 1, 0, 1 ); - nlr.setBias( 1, 1, 0 ); - nlr.setBias( 1, 2, 0 ); - - nlr.setBias( 3, 0, 0 ); nlr.setBias( 3, 1, 2 ); - nlr.setBias( 5, 0, 0 ); - nlr.setBias( 5, 1, 0 ); - - // Mark the ReLU/Abs sources + // Mark the ReLU/Bilinear sources nlr.addActivationSource( 1, 0, 2, 0 ); nlr.addActivationSource( 1, 1, 2, 1 ); nlr.addActivationSource( 1, 2, 2, 2 ); nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); - // Start the testing - Query ipq; - nlr.generateQuery( ipq ); - List unhandledEquations; - Set varsInUnhandledConstraints; - TS_ASSERT( - ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); - NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - double input[2]; - double output[2]; + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - input[0] = 1; - input[1] = 1; + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - input[0] = 1; - input[1] = 2; + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + // Very loose bounds for neurons except inputs + double large = 1000000; - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); } - - void test_simulate_relu() + + void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { - NLR::NetworkLevelReasoner nlr; + /* + x3 x7 + x0 + x4 x8 x11 x13 + x1 x15 + x5 x9 x12 x14 + x2 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = ReLU( x3 ) + x8 = ReLU( x4 ) + x9 = ReLU( x5 ) + x10 = ReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + + x13 = ReLU( x11 ) + x14 = ReLU( x12 ) + + x15 = x13 * x14 + */ - populateNetwork( nlr ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::BILINEAR, 1 ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - // With ReLUs, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } + nlr.setBias( 3, 0, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + nlr.addActivationSource( 4, 0, 5, 0 ); + nlr.addActivationSource( 4, 1, 5, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 14 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 15 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 16 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + } + + void test_evaluate_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + double input[2]; + double output[2]; + + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); // With ReLUs, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); + input[0] = 1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 1 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); - // With ReLUs, case 1 and 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); + // With ReLUs, case 2 + input[0] = 1; + input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); } - void test_simulate_sigmoids() + void test_evaluate_sigmoids() { NLR::NetworkLevelReasoner nlr; populateNetworkWithSigmoids( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[2]; // case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + input[0] = 0; + input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.6750, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 3.0167, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); // case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); + input[0] = 1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.6032, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.5790, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); // case 3 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); + input[0] = 1; + input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.5045, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.1957, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); } - void test_simulate_round() + void test_evaluate_abs() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithRound( nlr ); + populateNetworkWithAbs( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[2]; - // With Round, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + // With Abs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // With Round, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 2.1 ) ); - simulations2.append( Vector( simulationSize, 1.4 ) ); + // With Abs, case 1 + input[0] = -2; + input[1] = -2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // With Round, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 2.1 ) ); - simulations3.append( Vector( simulationSize, 1.6 ) ); + // With Abs, case 2 + input[0] = 1; + input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -12, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); } - void test_simulate_sign() + void test_evaluate_sign() { NLR::NetworkLevelReasoner nlr; populateNetworkWithSign( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[2]; // With Sign, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + input[0] = 0; + input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); // With Sign, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -2 ) ); - simulations2.append( Vector( simulationSize, -2 ) ); + input[0] = -2; + input[1] = -2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // With Sign, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -1 ) ); - simulations3.append( Vector( simulationSize, 1 ) ); + // With Sign, case 2 (0 considered "non-negative", sign(0)=1) + input[0] = -1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); } - void test_simulate_abs() + void test_evaluate_round() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithAbs( nlr ); + populateNetworkWithRound( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[2]; - // With Abs, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + // With Round, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - // With Abs, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -2 ) ); - simulations2.append( Vector( simulationSize, -2 ) ); + // With Round, case 1 + input[0] = 2.1; + input[1] = 1.4; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - // With Abs, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); + // With Round, case 2 + input[0] = 2.1; + input[1] = 1.6; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 4 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 10 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); } - void test_simulate_leaky_relu() + void test_evaluate_leaky_relu() { NLR::NetworkLevelReasoner nlr; populateNetworkWithLeakyRelu( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[2]; // With Leaky ReLU, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + input[0] = 0; + input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); // With Leaky ReLU, case 1 (alpha=0.1) - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); + input[0] = 1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.9, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0.57, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); // With Leaky ReLU, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); + input[0] = 1; + input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -0.04, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -0.76, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); } - void test_simulate_max() + void test_evaluate_max() { NLR::NetworkLevelReasoner nlr; populateNetworkWithMax( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[1]; // With Max, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); + input[0] = 0; + input[1] = 0; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -3 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); // With Max, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, -3 ) ); + input[0] = 1; + input[1] = -3; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -18 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); // With Max, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -3 ) ); - simulations3.append( Vector( simulationSize, 3 ) ); + input[0] = -3; + input[1] = 3; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -5 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); } - void test_simulate_softmax() + + void test_evaluate_softmax() { NLR::NetworkLevelReasoner nlr; populateNetworkWithSoftmax( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[2]; // With Softmax, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + input[0] = 0; + input[1] = 0; - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.2999, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.4001, - 0.0001 ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); // With Softmax, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, -3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + input[0] = 1; + input[1] = -3; - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.1192, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.7615, - 0.0001 ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); // With Softmax, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -3 ) ); - simulations3.append( Vector( simulationSize, 3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.1206, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.7588, - 0.0001 ) ); - } + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); } - void test_simulate_bilinear() + void test_evaluate_bilinear() { NLR::NetworkLevelReasoner nlr; populateNetworkWithBilinear( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[1]; // With Bilinear, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + input[0] = 0; + input[1] = 0; - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); // With Bilinear, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 0.1 ) ); - simulations2.append( Vector( simulationSize, -0.3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + input[0] = 0.1; + input[1] = -0.3; - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2.8304, - 0.0001 ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); // With Bilinear, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -0.3 ) ); - simulations3.append( Vector( simulationSize, 0.3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + input[0] = -0.3; + input[1] = 0.3; - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.0912, - 0.0001 ) ); - } + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); } - - void test_simulate_non_consecutive_layers() + + void test_evaluate_non_consecutive_layers() { NLR::NetworkLevelReasoner nlr; @@ -3565,889 +4457,3056 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 4, 1, 5, 0, 1 ); nlr.setWeight( 4, 2, 5, 0, 1 ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + // Evaluate + double input[2]; + double output; - // Simulate1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); + input[0] = 1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2 ) ); + input[0] = -1; + input[1] = 2; - // Simulate2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); } - void test_simulate_abs_and_relu() + void test_evaluate_abs_and_relu() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithAbsAndRelu( nlr ); - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + double input[2]; + double output[2]; - // Simulate1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); + input[0] = 1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - // Simulate2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); + input[0] = 1; + input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 4 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); } - void test_simulate_round_and_sign() + void test_evaluate_round_and_sign() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithRoundAndSign( nlr ); + + double input[2]; + double output[2]; - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Round/Sign, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1.6 ) ); - simulations1.append( Vector( simulationSize, 1.4 ) ); + input[0] = 1.6; + input[1] = 1.4; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -2 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); - // With Round/Sign, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1.6 ) ); - simulations2.append( Vector( simulationSize, 1.6 ) ); + input[0] = 1.6; + input[1] = 1.6; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); } - void test_simulate_leaky_relu_and_sigmoid() + void test_evaluate_leaky_relu_and_sigmoid() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + double input[2]; + double output[2]; - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With LeakyReLU/Sigmoid, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); + input[0] = 1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.7109, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 1.4602, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); - // With LeakyReLU/Sigmoid, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); + input[0] = 1; + input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.4013, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0.6508, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); } - void test_simulate_softmax_and_max() + void test_evaluate_softmax_and_max() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithSoftmaxAndMax( nlr ); + + double input[2]; + double output[1]; - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With LeakyReLU/Sigmoid, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, -3 ) ); + input[0] = 1; + input[1] = -3; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -2.9998, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); - // With LeakyReLU/Sigmoid, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -3 ) ); - simulations2.append( Vector( simulationSize, 3 ) ); + input[0] = -3; + input[1] = 3; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1, - 0.0001 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); } - void test_simulate_relu_and_bilinear() + void test_evaluate_relu_and_bilinear() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithReluAndBilinear( nlr ); + + double input[2]; + double output[1]; - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Relu/Bilinear, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); + input[0] = 1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - // With ReLU/Bilinear, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); + input[0] = 1; + input[1] = 2; - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); } - - void test_interval_arithmetic_bound_propagation_relu_constraints() + + void test_store_into_other() { NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + NLR::NetworkLevelReasoner nlr2; - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + double input[2]; + double output1[2]; + double output2[2]; - nlr.setTableau( &tableau ); + // Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + void test_store_into_other_with_sigmoids() + { + NLR::NetworkLevelReasoner nlr; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + populateNetworkWithSigmoids( nlr ); - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + NLR::NetworkLevelReasoner nlr2; - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + double input[2]; + double output1[2]; + double output2[2]; - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + // case 1 + input[0] = 0; + input[1] = 0; - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + // case 2 + input[0] = 1; + input[1] = 1; - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - - void test_interval_arithmetic_bound_propagation_abs_constraints() + + void test_store_into_other_with_round() { NLR::NetworkLevelReasoner nlr; - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + populateNetworkWithRound( nlr ); - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); + NLR::NetworkLevelReasoner nlr2; - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); + double input[2]; + double output1[2]; + double output2[2]; - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); + // case 1 + input[0] = 0; + input[1] = 0; - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); + // case 2 + input[0] = 1; + input[1] = 1; - // Layer dependenices - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 4, 5 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_sign() + { + NLR::NetworkLevelReasoner nlr; - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + populateNetworkWithSign( nlr ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + NLR::NetworkLevelReasoner nlr2; - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + double input[2]; + double output1[2]; + double output2[2]; - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + // case 1 + input[0] = 0; + input[1] = 0; - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + // case 2 + input[0] = 1; + input[1] = 1; - nlr.setTableau( &tableau ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_abs() + { + NLR::NetworkLevelReasoner nlr; - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + populateNetworkWithAbs( nlr ); - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + NLR::NetworkLevelReasoner nlr2; - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + double input[2]; + double output1[2]; + double output2[2]; - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + // case 1 + input[0] = 0; + input[1] = 0; - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), - } ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + // case 2 + input[0] = 1; + input[1] = 1; - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + populateNetworkWithLeakyRelu( nlr ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + NLR::NetworkLevelReasoner nlr2; - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + double input[2]; + double output1[2]; + double output2[2]; - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + // case 1 + input[0] = 0; + input[1] = 0; - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), - } ); + // case 2 + input[0] = 1; + input[1] = 1; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - void test_interval_arithmetic_bound_propagation_sign_constraints() + void test_store_into_other_with_max() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithSign( nlr ); - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + populateNetworkWithMax( nlr ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + NLR::NetworkLevelReasoner nlr2; - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - nlr.setTableau( &tableau ); + double input[2]; + double output1[2]; + double output2[2]; - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + // case 1 + input[0] = 0; + input[1] = 0; - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + // case 2 + input[0] = 1; + input[1] = 1; - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_softmax() + { + NLR::NetworkLevelReasoner nlr; - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + populateNetworkWithSoftmax( nlr ); - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); + NLR::NetworkLevelReasoner nlr2; - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - // Change the current bounds - tableau.setLowerBound( 0, 3 ); - tableau.setUpperBound( 0, 4 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + double input[2]; + double output1[2]; + double output2[2]; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + // case 1 + input[0] = 0; + input[1] = 0; - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - List expectedBounds2( { - Tightening( 2, 4, Tightening::LB ), Tightening( 2, 5, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + // case 2 + input[0] = 1; + input[1] = 1; - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 11, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_bilinear() + { + NLR::NetworkLevelReasoner nlr; - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + populateNetworkWithBilinear( nlr ); - Tightening( 10, 1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + NLR::NetworkLevelReasoner nlr2; - Tightening( 12, 1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, 4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() + void test_generate_input_query() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyRelu( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); - nlr.setTableau( &tableau ); + // Variable indexing - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), - - Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), - Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - Tightening( 12, -0.28, Tightening::LB ), Tightening( 12, 10.1, Tightening::UB ), - Tightening( 13, -1.42, Tightening::LB ), Tightening( 13, 37.4, Tightening::UB ), - } ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + // Set the weights and biases for the weighted sum layers - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 0 ); + nlr.setBias( 1, 2, 0 ); - Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + nlr.setBias( 3, 0, 0 ); + nlr.setBias( 3, 1, 2 ); - Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), - Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), - - Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), - Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), + nlr.setBias( 5, 0, 0 ); + nlr.setBias( 5, 1, 0 ); - Tightening( 12, -0.34, Tightening::LB ), Tightening( 12, 7.1, Tightening::UB ), - Tightening( 13, -1.3, Tightening::LB ), Tightening( 13, 29, Tightening::UB ), - } ); + // Mark the ReLU/Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_round_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithRound( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); - // Initialize the bounds - tableau.setLowerBound( 0, 1.4 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, -1.4 ); - tableau.setUpperBound( 1, 2.1 ); + // Start the testing + Query ipq; + nlr.generateQuery( ipq ); + List unhandledEquations; + Set varsInUnhandledConstraints; + TS_ASSERT( + ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); + NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + } + + void test_simulate_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With ReLUs, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With ReLUs, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 1 ) ); + } + + // With ReLUs, case 1 and 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0 ) ); + } + } + + void test_simulate_sigmoids() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSigmoids( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6750, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 3.0167, + 0.0001 ) ); + } + + // case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6032, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.5790, + 0.0001 ) ); + } + + // case 3 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.5045, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.1957, + 0.0001 ) ); + } + } + + void test_simulate_round() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRound( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Round, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Round, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 2.1 ) ); + simulations2.append( Vector( simulationSize, 1.4 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4, + 0.0001 ) ); + } + + // With Round, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 2.1 ) ); + simulations3.append( Vector( simulationSize, 1.6 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -12, + 0.0001 ) ); + } + } + + void test_simulate_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSign( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Sign, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Sign, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Sign, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -1 ) ); + simulations3.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4 ) ); + } + } + + void test_simulate_abs() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbs( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Abs, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Abs, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Abs, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 4 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 10 ) ); + } + } + + void test_simulate_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyRelu( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Leaky ReLU, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Leaky ReLU, case 1 (alpha=0.1) + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.9, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0.57, + 0.0001 ) ); + } + + // With Leaky ReLU, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -0.04, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -0.76, + 0.0001 ) ); + } + } + + void test_simulate_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithMax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Max, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -3 ) ); + } + + // With Max, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -18 ) ); + } + + // With Max, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -3 ) ); + simulations3.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -5 ) ); + } + } + + void test_simulate_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Softmax, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.2999, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.4001, + 0.0001 ) ); + } + + // With Softmax, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.1192, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.7615, + 0.0001 ) ); + } + + // With Softmax, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -3 ) ); + simulations3.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.1206, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.7588, + 0.0001 ) ); + } + } + + void test_simulate_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Bilinear, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + + // With Bilinear, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 0.1 ) ); + simulations2.append( Vector( simulationSize, -0.3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2.8304, + 0.0001 ) ); + } + + // With Bilinear, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -0.3 ) ); + simulations3.append( Vector( simulationSize, 0.3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.0912, + 0.0001 ) ); + } + } + + void test_simulate_non_consecutive_layers() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2 ) ); + + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + + void test_simulate_abs_and_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbsAndRelu( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2 ) ); + } + + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 4 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + } + + void test_simulate_round_and_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRoundAndSign( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Round/Sign, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1.6 ) ); + simulations1.append( Vector( simulationSize, 1.4 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -2 ) ); + } + + // With Round/Sign, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1.6 ) ); + simulations2.append( Vector( simulationSize, 1.6 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4 ) ); + } + } + + void test_simulate_leaky_relu_and_sigmoid() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With LeakyReLU/Sigmoid, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.7109, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 1.4602, + 0.0001 ) ); + } + + // With LeakyReLU/Sigmoid, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.4013, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0.6508, + 0.0001 ) ); + } + } + + void test_simulate_softmax_and_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmaxAndMax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With LeakyReLU/Sigmoid, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -2.9998, + 0.0001 ) ); + } + + // With LeakyReLU/Sigmoid, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -3 ) ); + simulations2.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1, + 0.0001 ) ); + } + } + + void test_simulate_relu_and_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithReluAndBilinear( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Relu/Bilinear, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + } + + // With ReLU/Bilinear, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + } + + void test_interval_arithmetic_bound_propagation_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_abs_constraints() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Layer dependenices + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_sign_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSign( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, 3 ); + tableau.setUpperBound( 0, 4 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, 4, Tightening::LB ), Tightening( 2, 5, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 11, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, 1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, 1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, 4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithLeakyRelu( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + + Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), + Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), + + Tightening( 12, -0.28, Tightening::LB ), Tightening( 12, 10.1, Tightening::UB ), + Tightening( 13, -1.42, Tightening::LB ), Tightening( 13, 37.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), + Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + + Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), + Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), + + Tightening( 12, -0.34, Tightening::LB ), Tightening( 12, 7.1, Tightening::UB ), + Tightening( 13, -1.3, Tightening::LB ), Tightening( 13, 29, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_round_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithRound( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, 1.4 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, -1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), + Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, -25, Tightening::LB ), Tightening( 13, 35, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3.1 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, 1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), + Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), + Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + + Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), + + Tightening( 12, -16, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -61, Tightening::LB ), Tightening( 13, 7, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_sigmoid_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSigmoids( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), + Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), + Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), + + Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), + Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), + + Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), + Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), + + Tightening( 12, 0.4441, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), + Tightening( 13, 2.2364, Tightening::LB ), Tightening( 13, 3.5399, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), + Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + + Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), + Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), + + Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), + Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), + + Tightening( 12, 0.3183, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), + Tightening( 13, 1.9963, Tightening::LB ), Tightening( 13, 3.6263, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_max_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithMax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), + Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + + Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), + Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), + Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), + + Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_softmax_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSoftmax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), + Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), + + Tightening( 12, 0.0240, Tightening::LB ), Tightening( 12, 0.8349, Tightening::UB ), + Tightening( 13, 0.5192, Tightening::LB ), Tightening( 13, 3.7629, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), + Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), + + Tightening( 12, 0.0184, Tightening::LB ), Tightening( 12, 0.8678, Tightening::UB ), + Tightening( 13, 0.4151, Tightening::LB ), Tightening( 13, 3.8125, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_bilinear_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithBilinear( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -0.1 ); + tableau.setUpperBound( 0, 0.1 ); + tableau.setLowerBound( 1, -0.1 ); + tableau.setUpperBound( 1, 0.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0.9, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), + Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), + Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), + Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), + + Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), + Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), + + Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), + Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), + + Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), + + Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -0.3 ); + tableau.setUpperBound( 0, 0.1 ); + tableau.setLowerBound( 1, -0.1 ); + tableau.setUpperBound( 1, 0.2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, 0.7, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), + Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), + Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), + Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), + + Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), + Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), + + Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), + Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), + + Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), + + Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithAbsAndRelu( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + + Tightening( 10, -10, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithRoundAndSign( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, 1.4 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, -1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), + Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3.1 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, 1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), + Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), + Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); double large = 1000; tableau.setLowerBound( 2, -large ); @@ -4475,366 +7534,922 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 13, -large ); tableau.setUpperBound( 13, large ); - nlr.setTableau( &tableau ); + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + + Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), + Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), + + Tightening( 12, 0.0573, Tightening::LB ), Tightening( 12, 0.9999, Tightening::UB ), + Tightening( 13, 0.1229, Tightening::LB ), Tightening( 13, 3.9996, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), + Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + + Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), + Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), + + Tightening( 12, 0.0323, Tightening::LB ), Tightening( 12, 0.9992, Tightening::UB ), + Tightening( 13, 0.1498, Tightening::LB ), Tightening( 13, 3.9972, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSoftmaxAndMax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), + + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ) + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.0654, Tightening::LB ), Tightening( 9, 2.9933, Tightening::UB ), + + Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 11, -2.9933, Tightening::LB ), Tightening( 11, -0.0654, Tightening::UB ) + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithReluAndBilinear( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_sbt_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. + Coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = 0.5x0 + 1.25x1 - 11.25 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); - // Initialize + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds( { - Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), - Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), - - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), - Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Layer 1: - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, -25, Tightening::LB ), Tightening( 13, 35, Tightening::UB ), - } ); + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - // Change the current bounds - tableau.setLowerBound( 0, -3.1 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, 1.4 ); - tableau.setUpperBound( 1, 2.1 ); + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + x4.lb = 0 + x4.ub = 0 - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 2: - List expectedBounds2( { - Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), - Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), - Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ - Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - - Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), + List expectedBounds( { + // x2 does not appear, because it has been eliminated - Tightening( 12, -16, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -61, Tightening::LB ), Tightening( 13, 7, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_sigmoid_constraints() + + void test_sbt_relu_residual1() { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSigmoids( nlr ); - + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual1( nlr, tableau ); - // Initialize the bounds tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - nlr.setTableau( &tableau ); + /* + Input ranges: - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x0: [-1, 1] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 1: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. + Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), - Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), - Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layer 2 (with residual from x0): - Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), - Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. + Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), - Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), + x4.lb = 0 + x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] + x4 range: [0, 2.5] - Tightening( 12, 0.4441, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), - Tightening( 13, 2.2364, Tightening::LB ), Tightening( 13, 3.5399, Tightening::UB ), + Layer 3 (with residual from x1): + + x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] + + x5 range: [-2, 4] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 2, Tightening::LB ), + Tightening( 5, 5.5, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual2( nlr, tableau ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); + tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x0: [-1, 1] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 1: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. + Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), - Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layer 2 (with residual from x0): - Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), - Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. + Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), - Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), + x4.lb = 0 + x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] + x4 range: [0, 2.5] - Tightening( 12, 0.3183, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), - Tightening( 13, 1.9963, Tightening::LB ), Tightening( 13, 3.6263, Tightening::UB ), + Layer 3 (with residual from x0): + + x5.lb = 3 ( 0 ) + 1 ( x0 ) + 1 = 1x0 + 1 : [0, 2] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] + x5 range: [0, 7.5] + + Layer 4: + x6.lb = 1x0 + 1 : [0, 2] + x6.ub = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] + x6 range: [0, 7.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 7.5, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 7.5, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_interval_arithmetic_bound_propagation_max_constraints() + void test_sbt_relu_reindex() { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithMax( nlr ); - + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); + nlr.setTableau( &tableau ); + populateNetworkSBTReluReindex( nlr, tableau ); - // Initialize the bounds tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - nlr.setTableau( &tableau ); + /* + Input ranges: - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x0: [-1, 1] + x1: [-1, 1] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 1: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), - Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Both ReLUs are undecided, bounds are concretized. + Coefficient: 2/( 2--2 ) = 2/4 = 0.5 - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [0, 2] + + x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [0, 2] - Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - } ); + Layer 2: + + x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] + x6 range: [-1, 3] + + x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] + x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] + x7 range: [-2, 2] + + Both ReLUs are undecided, bounds are concretized. + Coefficient (first ReLU, lower): 1/( 1--1 ) = 1/2 = 0.5 + Coefficient (first ReLU, upper): 1 (propagated as is) + Coefficient (second ReLU, lower): 0 (bound is zero) + Coefficient (second ReLU, upper): 2/( 2--2 ) = 2/4 = 0.5 + + x8.lb = 0.5 ( x0 ) = 0.5x0 + x8.ub = x0 + 2 + x8 range: [0, 3] + + x9.lb = 0 + x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 + x9 range: [0, 2] + + Layer 3: + + x10.lb = 1 ( 0.5x0 ) + 1 ( 0 ) + 1 = 0.5x0 + 1 : [0.5, 1.5] + x10.ub = 1 ( x0 + 2 ) + 1 ( 0.5x1 + 1.5 ) + 1 = x0 + 0.5x1 + 4.5 : [3, 6] + x10 range: [0.5, 6] + + x11.lb = 0.5x1 - 0.5 + x11.ub = 0.5x1 + 1.5 + x11 range: [0, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, 0.5, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_abs_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), - Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), - Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), - - Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_softmax_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmax( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; - double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -4845,184 +8460,109 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - nlr.setTableau( &tableau ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Initialize + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), - Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), - Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), + Layer 1: - Tightening( 12, 0.0240, Tightening::LB ), Tightening( 12, 0.8349, Tightening::UB ), - Tightening( 13, 0.5192, Tightening::LB ), Tightening( 13, 3.7629, Tightening::UB ), - } ); + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + Both absolute values positive, bound survive through activations: - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 2: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), - - Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), - Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), - Tightening( 12, 0.0184, Tightening::LB ), Tightening( 12, 0.8678, Tightening::UB ), - Tightening( 13, 0.4151, Tightening::LB ), Tightening( 13, 3.8125, Tightening::UB ), + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_bilinear_constraints() + + void test_sbt_abs_positive_and_negative() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; - populateNetworkWithBilinear( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -0.1 ); - tableau.setUpperBound( 0, 0.1 ); - tableau.setLowerBound( 1, -0.1 ); - tableau.setUpperBound( 1, 0.1 ); + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.setTableau( &tableau ); + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - List expectedBounds( { - Tightening( 2, 0.9, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), - Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), - Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), - Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - - Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), - Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), - Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), - - Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), - - Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), - } ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - // Change the current bounds - tableau.setLowerBound( 0, -0.3 ); - tableau.setUpperBound( 0, 0.1 ); - tableau.setLowerBound( 1, -0.1 ); - tableau.setUpperBound( 1, 0.2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); @@ -5034,59 +8574,114 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - // Initialize + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, 0.7, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), - Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), - Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), - Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - - Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), - Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), - Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), - - Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), - - Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() + void test_sbt_absolute_values_positive_and_not_fixed() { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithAbsAndRelu( nlr ); + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; - double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -5097,58 +8692,116 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - nlr.setTableau( &tableau ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Initialize + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Layer 1: - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - Tightening( 10, -5, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + First absolute value is undecided, bounds are concretized. + Second absolute value is active, bounds surive the activation - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); @@ -5160,66 +8813,114 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - // Initialize + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + Layer 1: - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x2 is eliminated, everything set to -3 - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] - Tightening( 10, -10, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + Second absolute value is positive, bounds surive the activation - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + x4: all set to 3 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), } ); - + + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() + void test_sbt_signs_positive_and_not_fixed() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; - populateNetworkWithRoundAndSign( nlr ); - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); - // Initialize the bounds - tableau.setLowerBound( 0, 1.4 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, -1.4 ); - tableau.setUpperBound( 1, 2.1 ); + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; - double large = 1000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -5230,58 +8931,119 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - nlr.setTableau( &tableau ); + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); - // Initialize + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is undecided, bounds are concretized. + Second sign is active, bounds become constant 1 + Coefficient (first Sign, lower): 2/12 = 1/6. + Coefficient (first Sign, upper): -2/-4 = 1/2. + + x4 range: [-1, 1] + x4.lb = 1/6 ( 2x0 + 3x1 - 15 ) - 1 = 2/6 x0 + 3/6 x1 - 21/6 + x4.ub = 1/2 ( 2x0 + 3x1 - 15 ) + 1 = x0 + 1.5x1 - 6.5 + + x5 range: [1, 1] + x5.lb = 1 + x5.ub = 1 + + Layer 2: + + x6.lb = 1 ( 2/6 x0 + 3/6 x1 - 21/6 ) - 1 ( 1 ) = 2/6 x0 + 3/6 x1 - 27/6 : [-16/6, 0] + x6.ub = 1 ( x0 + 1.5x1 - 6.5 ) - 1 ( 1 ) = x0 + 1.5x1 - 7.5 : [-2, 6] + + x6 range: [-8/3, 6] + */ List expectedBounds( { - Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), - Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2.6667, Tightening::LB ), + Tightening( 6, 6, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_signs_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); - Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), - - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - // Change the current bounds - tableau.setLowerBound( 0, -3.1 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, 1.4 ); - tableau.setUpperBound( 1, 2.1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); @@ -5293,507 +9055,544 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - // Initialize + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), - Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), - Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x0: [4, 6] + x1: [1, 5] - Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + Layer 1: - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + x2 is eliminated, everything set to -3 + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is negative, bounds become constant -1 + Second sign is positive, bounds become constant 1 + + x4: all set to -1 + + x5: all set to 1 + + Layer 2: + + x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 + x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, -1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() + void test_sbt_leaky_relu() { NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyReluAndSigmoid( nlr ); - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); + nlr.setTableau( &tableau ); + populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 - // Initialize the bounds tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); + tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + tableau.setUpperBound( 1, 1 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - nlr.setTableau( &tableau ); + /* + Input ranges: - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x0: [-1, 1] + x1: [-1, 1] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 1: - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] - Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 - Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), - Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), - - Tightening( 12, 0.0573, Tightening::LB ), Tightening( 12, 0.9999, Tightening::UB ), - Tightening( 13, 0.1229, Tightening::LB ), Tightening( 13, 3.9996, Tightening::UB ), - } ); + x4.lb = x0 + x1 + x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 + x4 range: [-0.4, 2] + + x5.lb = x0 - x1 + x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 + x5 range: [-0.4, 2] + + Layer 2: - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] + x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] + x6 range: [-2, 2.8] + + x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] + x7 range: [-2.8, 2.8] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 + Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 + + Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 + Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 + + x8.lb = 2x0 + x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 + x8 range: [-0.4, 2.8] + + x9.lb = 0.4x0 + 1.6x1 - 0.8 + x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 + x9 range: [-0.56, 2.8] - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + Layer 3: - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); + x10.lb = 1 ( 0.4x0 + 1.6x1 - 0.8 ) + 1 ( 2x0 ) + 1 = 2.4x0 + 1.6x1 + 0.2 : [-3.8, 5.2] + x10.ub = 1 ( -0.24 x0 + 0.96 x1 + 1.6 ) + 1 ( 0.8x0 + 2 ) + 1 = 0.56x0 + 0.96x1 + 4.6 : [3.08, 6.12] + x10 range: [-3.8, 6.12] + + x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x11.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] + x11 range: [-2.8, 2.8] + + */ - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), + Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), - Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -0.4, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -0.56, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), - Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), - Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), - - Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), - Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 6.12, Tightening::UB ), + Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) - Tightening( 12, 0.0323, Tightening::LB ), Tightening( 12, 0.9992, Tightening::UB ), - Tightening( 13, 0.1498, Tightening::LB ), Tightening( 13, 3.9972, Tightening::UB ), - } ); + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() + + void test_sbt_sigmoids_and_round() { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmaxAndMax( nlr ); - + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); + nlr.setTableau( &tableau ); + populateNetworkSBTSigmoidsAndRound( nlr, tableau ); - // Initialize the bounds tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - nlr.setTableau( &tableau ); + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + // Layer 1 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + // Layer 2 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), - Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) - Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), - - Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ) - } ); + def g_prime(x): + return g(x) * (1 - g(x)) - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + def lam(l, u): + return (g(u) - g(l)) / (u - l) - // Change the current bounds - tableau.setLowerBound( 0, -3 ); + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + } + + void test_sbt_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - // Initialize + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + /* + Input ranges: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + x0: [-1, 1] + x1: [-1, 2] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 3] + x2.ub = x0 + x1 : [-2, 3] + + x3.lb = x0 - x1 : [-3, 2] + x3.ub = x0 - x1 : [-3, 2] + + Both ReLUs are undecided, bounds are concretized. + Coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6 + Coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 + + x4.lb = 0.6 ( x0 + x1 ) = 0.6x0 + 0.6x1 + x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 + x4 range: [0, 3] + + x5.lb = 0.4 ( x0 - x1 ) = 0.4x0 + 0.4x1 + x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 + x5 range: [0, 2] + + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x4, and its upper bound is constant 3. + + x6.lb = 0.6x0 + 0.6x1 : [-1.2, 1.8] + x6.ub = 3 : [3, 3] + x6 range: [-1.2, 3] + + Layer 3: + + x7.lb = 2 ( 0.6x0 + 0.6x1 ) = 1.2x0 + 1.8x1 : [-2.4, 3.6] + x7.ub = 2 ( 3 ) = 6 : [6, 6] + x7 range: [-2.4, 6] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 3, Tightening::UB ), + Tightening( 3, -3, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2.4, Tightening::LB ), + Tightening( 7, 6, Tightening::UB ), - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.0654, Tightening::LB ), Tightening( 9, 2.9933, Tightening::UB ), - - Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), - - Tightening( 11, -2.9933, Tightening::LB ), Tightening( 11, -0.0654, Tightening::UB ) } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() + + void test_sbt_max_fixed() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + NLR::NetworkLevelReasoner nlr; - populateNetworkWithReluAndBilinear( nlr ); - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -3 ); + tableau.setUpperBound( 1, -2 ); - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + /* + Input ranges: - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - - Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - } ); + x0: [1, 2] + x1: [-3, -2] - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + Layer 1: - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + x2.lb = x0 + x1 : [-2, 0] + x2.ub = x0 + x1 : [-2, 0] - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + x3.lb = x0 - x1 : [3, 5] + x3.ub = x0 - x1 : [3, 5] - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); + First ReLU is negative, bounds become constant 0 + Second ReLU is positive, bounds survive the activation - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + x4: all set to 0 + + x5.lb = x0 - x1 : [3, 5] + x5.ub = x0 - x1 : [3, 5] + + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds + + x6.lb = x0 - x1 : [3, 5] + x6.ub = x0 - x1 : [3, 5] - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + Layer 3: - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + */ - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 0, Tightening::UB ), + Tightening( 3, 3, Tightening::LB ), + Tightening( 3, 5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 3, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + Tightening( 6, 3, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 6, Tightening::LB ), + Tightening( 7, 10, Tightening::UB ), - Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - - Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - - void test_sbt_relus_all_active() + + void test_sbt_softmax1() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + } - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] + void test_sbt_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); - Layer 1: + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + /* + Input ranges: - Both ReLUs active, bound survive through activations: + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] + } ); - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); - Layer 2: + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + /* + Input ranges: - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); + } ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } } - void test_sbt_relus_active_and_inactive() + void test_sbt_softmax3() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); + populateNetworkSBTSoftmax2( nlr, tableau ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -5802,45 +9601,40 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite /* Input ranges: - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is inactive, bounds get zeroed - Second ReLU is active, bounds surive the activation - - x4.lb = 0 - x4.ub = 0 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] */ - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + List expectedBounds({ Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), + Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.8668, Tightening::LB ), + Tightening( 8, 0.8668, Tightening::UB ), + Tightening( 9, 0.9820, Tightening::LB ), + Tightening( 9, 0.9820, Tightening::UB ), + Tightening( 10, 0.1173, Tightening::LB ), + Tightening( 10, 0.1173, Tightening::UB ), + Tightening( 11, 0.0179, Tightening::LB ), + Tightening( 11, 0.0179, Tightening::UB ), + Tightening( 12, 0.0159, Tightening::LB ), + Tightening( 12, 0.0159, Tightening::UB ), + Tightening( 13, 0.9470, Tightening::LB ), + Tightening( 13, 0.9470, Tightening::UB ), + Tightening( 14, -0.9470, Tightening::LB ), + Tightening( 14, -0.9470, Tightening::UB ), + Tightening( 15, 1.0253, Tightening::LB ), + Tightening( 15, 1.0253, Tightening::UB ), + Tightening( 16, -1.0253, Tightening::LB ), + Tightening( 16, -1.0253, Tightening::UB ) - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), } ); List bounds; @@ -5848,99 +9642,64 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_sbt_relus_active_and_not_fixed() + void test_softmax_bounds_er() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is undecided, bound is concretized. - Coefficient: 12/(12--4) = 12/16 = 0.75 - Second ReLU is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 - x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = 0.5x0 + 1.25x1 - 11.25 - x6.ub = 0.5x0 + 1.25x1 - 8.25 - - x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] - */ + Vector inputLb = { -1, 0, 1 }; + Vector inputUb = { 0, 2, 4 }; + Vector input = { -0.5, 1, 2.5 }; - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + double value = NLR::DeepPolySoftmaxElement::ERLowerBound( input, inputLb, inputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0114799, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.00563867, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 1, Tightening::UB ), - } ); + Vector outputLb = { 0.2, 0, 0 }; + Vector outputUb = { 0.4, 0.1, 0.1 }; - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + value = NLR::DeepPolySoftmaxElement::ERUpperBound( input, outputLb, outputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, -1.44538, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 1.96538, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.358535, 0.00001 ) ); } - void test_sbt_relus_active_and_externally_fixed() + void test_softmax_bounds_lse1() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Vector inputLb = { -1, 0, 1 }; + Vector inputUb = { 0, 2, 3 }; + Vector input = { -0.5, 1, 2 }; + double value = NLR::DeepPolySoftmaxElement::LSELowerBound( input, inputLb, inputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.00703444, 0.001 ) ); + Vector outputLb = { 0.2, 0, 0 }; + Vector outputUb = { 0.4, 0.1, 0.1 }; + value = NLR::DeepPolySoftmaxElement::LSEUpperBound( input, outputLb, outputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.272204, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); + } + + void test_sbt_bilinear() + { NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); + populateNetworkSBTBilinear( nlr, tableau ); - // However, one of the ReLU's variables has been eliminated - nlr.eliminateVariable( 2, -3 ); + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); @@ -5949,350 +9708,238 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite /* Input ranges: - x0: [4, 6] - x1: [1, 5] - + x0: [1, 2] + x1: [-2, 1] + Layer 1: - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] + x2.lb = x0 - 2x1 : [-1, 6] + x2.ub = x0 - 2x1 : [-1, 6] - First ReLU is inactive (set externally), bounds get zeroed - Second ReLU is active, bounds surive the activation + x3.lb = x0 + x1 : [-1, 3] + x3.ub = x0 + x1 : [-1, 3] - x4.lb = 0 - x4.ub = 0 + Coefficients for bilinear layer: + Lower bound: + alpha_l = x3.lb = -1 + beta = x2.lb = -1 + gamma_l = -x2.lb * x3.lb = --1 * -1 = -1 + + Upper bound: + alpha_u = x3.ub = 3 + beta = x2.lb = -1 + gamma_u = -x2.lb * x3.ub = --1 * 3 = 3 - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] + x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] + x4 range: [-7, 21] - Layer 2: + Layer 3: - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] + x7.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] + x7.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] + x4 range: [-21, 5] */ - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), - } ); + List expectedBounds( { Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 6, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + Tightening( 4, -7, Tightening::LB ), + Tightening( 4, 21, Tightening::UB ), + Tightening( 5, -21, Tightening::LB ), + Tightening( 5, 7, Tightening::UB ) } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - void test_sbt_relu_residual1() + void test_concretize_input_assignment() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTReluResidual1( nlr, tableau ); - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); + populateNetwork( nlr ); - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + // With ReLUs, Inputs are zeros, only biases count + tableau.nextValues[0] = 0; + tableau.nextValues[1] = 0; - /* - Input ranges: + Map assignment; - x0: [-1, 1] + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - Layer 1: + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); - x1.lb = x0 : [-1, 1] - x1.ub = x0 : [-1, 1] + TS_ASSERT( assignment.size() == 14 ); + TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 4 ) ); - ReLU is undecided, bound is concretized. - Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - - x2.lb = 0.5x0 - x2.ub = 0.5x0 + 0.5 - x2 range: [0, 1] - - Layer 2 (with residual from x0): + // With ReLUs, case 1 + tableau.nextValues[0] = 1; + tableau.nextValues[1] = 1; - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] - x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] - x3 range: [-1, 2.5] - - ReLU is undecided, bound is concretized. - Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - - x4.lb = 0 - x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] - x4 range: [0, 2.5] + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - Layer 3 (with residual from x1): + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); - x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] - x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] + TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 1 ) ); - x5 range: [-2, 4] - */ + // With ReLUs, case 2 + tableau.nextValues[0] = 1; + tableau.nextValues[1] = 2; - List expectedBounds( { - Tightening( 1, -1, Tightening::LB ), - Tightening( 1, 1, Tightening::UB ), - Tightening( 2, 0, Tightening::LB ), - Tightening( 2, 1, Tightening::UB ), - Tightening( 3, -1, Tightening::LB ), - Tightening( 3, 2.5, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 2.5, Tightening::UB ), - Tightening( 5, 2, Tightening::LB ), - Tightening( 5, 5.5, Tightening::UB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); + + TS_ASSERT( FloatUtils::areEqual( assignment[12], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); } - - void test_sbt_relu_residual2() + + + void test_obtain_bound_from_ipq() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTReluResidual2( nlr, tableau ); + populateNetwork( nlr ); - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); + Query query; + query.setNumberOfVariables( 14 ); - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - /* - Input ranges: + // Initialize the bounds + query.setLowerBound( 0, -1 ); + query.setUpperBound( 0, 1 ); + query.setLowerBound( 1, -1 ); + query.setUpperBound( 1, 1 ); - x0: [-1, 1] + double large = 1000; + query.setLowerBound( 2, -large ); + query.setUpperBound( 2, large ); + query.setLowerBound( 3, -large ); + query.setUpperBound( 3, large ); + query.setLowerBound( 4, -large ); + query.setUpperBound( 4, large ); + query.setLowerBound( 5, -large ); + query.setUpperBound( 5, large ); + query.setLowerBound( 6, -large ); + query.setUpperBound( 6, large ); + query.setLowerBound( 7, -large ); + query.setUpperBound( 7, large ); + query.setLowerBound( 8, -large ); + query.setUpperBound( 8, large ); + query.setLowerBound( 9, -large ); + query.setUpperBound( 9, large ); + query.setLowerBound( 10, -large ); + query.setUpperBound( 10, large ); + query.setLowerBound( 11, -large ); + query.setUpperBound( 11, large ); + query.setLowerBound( 12, -large ); + query.setUpperBound( 12, large ); + query.setLowerBound( 13, -large ); + query.setUpperBound( 13, large ); - Layer 1: + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds( query ) ); - x1.lb = x0 : [-1, 1] - x1.ub = x0 : [-1, 1] + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - ReLU is undecided, bound is concretized. - Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - - x2.lb = 0.5x0 - x2.ub = 0.5x0 + 0.5 - x2 range: [0, 1] - - Layer 2 (with residual from x0): + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] - x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] - x3 range: [-1, 2.5] - - ReLU is undecided, bound is concretized. - Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - - x4.lb = 0 - x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] - x4 range: [0, 2.5] + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Layer 3 (with residual from x0): + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - x5.lb = 3 ( 0 ) + 1 ( x0 ) + 1 = 1x0 + 1 : [0, 2] - x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] - x5 range: [0, 7.5] - - Layer 4: - x6.lb = 1x0 + 1 : [0, 2] - x6.ub = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] - x6 range: [0, 7.5] - */ + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - List expectedBounds( { - Tightening( 1, -1, Tightening::LB ), - Tightening( 1, 1, Tightening::UB ), - Tightening( 2, 0, Tightening::LB ), - Tightening( 2, 1, Tightening::UB ), - Tightening( 3, -1, Tightening::LB ), - Tightening( 3, 2.5, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 2.5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 7.5, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), - Tightening( 6, 7.5, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : expectedBounds ) + TS_ASSERT( bounds.exists( bound ) ); } - void test_sbt_relu_reindex() + void test_backwards_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTReluReindex( nlr, tableau ); + populateNetworkBackwardReLU( nlr, tableau ); - tableau.setLowerBound( 0, -1 ); + tableau.setLowerBound( 0, 0 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); + tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - // Invoke SBT + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - x1: [-1, 1] - - Layer 1: - - x2.lb = x0 + x1 : [-2, 2] - x2.ub = x0 + x1 : [-2, 2] - - x3.lb = x0 - x1 : [-2, 2] - x3.ub = x0 - x1 : [-2, 2] - - Both ReLUs are undecided, bounds are concretized. - Coefficient: 2/( 2--2 ) = 2/4 = 0.5 - - x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 - x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 - x4 range: [0, 2] - - x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 - x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 - x5 range: [0, 2] - - Layer 2: - - x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] - x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] - x6 range: [-1, 3] - - x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] - x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] - x7 range: [-2, 2] - - Both ReLUs are undecided, bounds are concretized. - Coefficient (first ReLU, lower): 1/( 1--1 ) = 1/2 = 0.5 - Coefficient (first ReLU, upper): 1 (propagated as is) - Coefficient (second ReLU, lower): 0 (bound is zero) - Coefficient (second ReLU, upper): 2/( 2--2 ) = 2/4 = 0.5 - - x8.lb = 0.5 ( x0 ) = 0.5x0 - x8.ub = x0 + 2 - x8 range: [0, 3] - - x9.lb = 0 - x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 - x9 range: [0, 2] - - Layer 3: - - x10.lb = 1 ( 0.5x0 ) + 1 ( 0 ) + 1 = 0.5x0 + 1 : [0.5, 1.5] - x10.ub = 1 ( x0 + 2 ) + 1 ( 0.5x1 + 1.5 ) + 1 = x0 + 0.5x1 + 4.5 : [3, 6] - x10 range: [0.5, 6] - - x11.lb = 0.5x1 - 0.5 - x11.ub = 0.5x1 + 1.5 - x11 range: [0, 2] - - */ - + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + List expectedBounds( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 10, 0.5, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), } ); - + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_abs_all_positive() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { Tightening( 8, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - // Very loose bounds for neurons except inputs double large = 1000000; - tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -6303,112 +9950,138 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - // Invoke SBT + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Both absolute values positive, bound survive through activations: - - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - List bounds; + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - - void test_sbt_abs_positive_and_negative() + + void test_backwards_relu2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + List expectedBounds( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 19, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - // Very loose bounds for neurons except inputs - double large = 1000000; + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); + double large = 1000000; tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -6417,114 +10090,155 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); - - // Invoke SBT + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First absolute value is negative, bounds get flipped - Second absolute value is positive, bounds surive the activation - - x4.lb = -2x0 -3x1 + 30 : [3, 19] - x4.ub = -2x0 -3x1 + 30 : [3, 19] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] - x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] - */ - - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 19, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 14, Tightening::UB ), - } ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { Tightening( 7, 0, Tightening::LB ), - List bounds; + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 20, 0, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - void test_sbt_absolute_values_positive_and_not_fixed() + void test_backwards_sigmoid() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), + Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), + Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), + Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - // Very loose bounds for neurons except inputs double large = 1000000; - tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -6535,117 +10249,284 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - // Invoke SBT + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First absolute value is undecided, bounds are concretized. - Second absolute value is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0 - x4.ub = 12 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Layer 2: + Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), + Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 + 12 : [ 1, 7] + Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), + Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), - x6 range: [-11, 7] - */ + Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), + Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), + Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sigmoid2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid2( nlr, tableau ); - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, 7, Tightening::UB ), - } ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), + Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), + + Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), + Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), + Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), + + Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), + Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), + Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), + + Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), + Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), + + Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), + Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), + + Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); - void test_sbt_absolute_values_active_and_externally_fixed() + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), + Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), + Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), + + Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), + Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), + Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), + + Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), + Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), + Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), + + Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), + Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), + + Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), + Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), + + Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_abs() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); + populateNetworkBackwardAbs( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - // Very loose bounds for neurons except inputs double large = 1000000; - tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -6656,116 +10537,133 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, the weighted sum variable has been eliminated - nlr.eliminateVariable( 2, -3 ); - // Invoke SBT + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2 is eliminated, everything set to -3 - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Second absolute value is positive, bounds surive the activation - - x4: all set to 3 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 + 3 : [-8, -2] - x6.ub = - x0 - x1 + 3 : [-8, -2] - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, -2, Tightening::UB ), - } ); + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), - List bounds; + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - void test_sbt_signs_positive_and_not_fixed() + void test_backwards_abs2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); + populateNetworkBackwardAbs2( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); - // Very loose bounds for neurons except inputs double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -6774,120 +10672,149 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First sign is undecided, bounds are concretized. - Second sign is active, bounds become constant 1 - Coefficient (first Sign, lower): 2/12 = 1/6. - Coefficient (first Sign, upper): -2/-4 = 1/2. - - x4 range: [-1, 1] - x4.lb = 1/6 ( 2x0 + 3x1 - 15 ) - 1 = 2/6 x0 + 3/6 x1 - 21/6 - x4.ub = 1/2 ( 2x0 + 3x1 - 15 ) + 1 = x0 + 1.5x1 - 6.5 - - x5 range: [1, 1] - x5.lb = 1 - x5.ub = 1 - - Layer 2: - - x6.lb = 1 ( 2/6 x0 + 3/6 x1 - 21/6 ) - 1 ( 1 ) = 2/6 x0 + 3/6 x1 - 27/6 : [-16/6, 0] - x6.ub = 1 ( x0 + 1.5x1 - 6.5 ) - 1 ( 1 ) = x0 + 1.5x1 - 7.5 : [-2, 6] - - x6 range: [-8/3, 6] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), - Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), - Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2.6667, Tightening::LB ), - Tightening( 6, 6, Tightening::UB ), - } ); - - List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + + Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + + Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - - void test_sbt_signs_active_and_externally_fixed() + + void test_backwards_round() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); nlr.setTableau( &tableau ); + populateNetworkBackwardRound( nlr, tableau ); - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { Tightening( 11, -4, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - // Very loose bounds for neurons except inputs double large = 1000000; - tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); @@ -6898,414 +10825,650 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, the weighted sum variable has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2 is eliminated, everything set to -3 - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First sign is negative, bounds become constant -1 - Second sign is positive, bounds become constant 1 - - x4: all set to -1 - - x5: all set to 1 - - Layer 2: - - x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 - x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), - Tightening( 4, -1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), - Tightening( 5, 1, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -2, Tightening::LB ), - Tightening( 6, -2, Tightening::UB ), - } ); + Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), - List bounds; + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { Tightening( 7, -10, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + + Tightening( 11, -14, Tightening::LB ), Tightening( 11, 11, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - void test_sbt_leaky_relu() + void test_backwards_round2() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 + populateNetworkBackwardRound2( nlr, tableau ); tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - x1: [-1, 1] - - Layer 1: - - x2.lb = x0 + x1 : [-2, 2] - x2.ub = x0 + x1 : [-2, 2] - - x3.lb = x0 - x1 : [-2, 2] - x3.ub = x0 - x1 : [-2, 2] - - Both LeakyReLUs are undecided, bounds are concretized. - Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 - Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 - - x4.lb = x0 + x1 - x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 - x4 range: [-0.4, 2] - - x5.lb = x0 - x1 - x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 - x5 range: [-0.4, 2] - - Layer 2: - - x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] - x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] - x6 range: [-2, 2.8] - - x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] - x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] - x7 range: [-2.8, 2.8] - - Both LeakyReLUs are undecided, bounds are concretized. - Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 - Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 - - Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 - Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 - - x8.lb = 2x0 - x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 - x8 range: [-0.4, 2.8] - - x9.lb = 0.4x0 + 1.6x1 - 0.8 - x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 - x9 range: [-0.56, 2.8] - - Layer 3: - - x10.lb = 1 ( 0.4x0 + 1.6x1 - 0.8 ) + 1 ( 2x0 ) + 1 = 2.4x0 + 1.6x1 + 0.2 : [-3.8, 5.2] - x10.ub = 1 ( -0.24 x0 + 0.96 x1 + 1.6 ) + 1 ( 0.8x0 + 2 ) + 1 = 0.56x0 + 0.96x1 + 4.6 : [3.08, 6.12] - x10 range: [-3.8, 6.12] - - x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] - x11.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] - x11 range: [-2.8, 2.8] - - */ + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + List expectedBounds( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), - Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), - - Tightening( 8, -0.4, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), - Tightening( 9, -0.56, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), - - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 6.12, Tightening::UB ), - Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) - + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), + Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), + Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + + Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), + Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + + Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), + Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + + Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), } ); - + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_sigmoids_and_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSigmoidsAndRound( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); - // Invoke SBT + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), + Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), + Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), + Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + + Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - // Layer 1 - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); - - // Layer 2 - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); - - // Layer 3 - /* - Double-check with Python - --- - from math import exp as e - def g(x): - return 1 / (1 + e(-x)) - - def g_prime(x): - return g(x) * (1 - g(x)) - - def lam(l, u): - return (g(u) - g(l)) / (u - l) - - def lam_prime(l, u): - return min(g_prime(l), g_prime(u)) - - l3 = l4 = -2 - u3 = u4 = 2 - l5 = l6 = g(-2) - u5 = u6 = g(2) - lambda7 = lam(l3, u3) - lambda7_prime = lam_prime(l3, u3) - lambda8 = lam(l4, u4) - lambda8_prime = lam_prime(l4, u4) - x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) - x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) - x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) - x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) - print(x7_l) - print(x7_u) - print(x8_l) - print(x8_u) - --- - [output]: - 0.4483930148512481 - 1.5516069851487517 - -0.5516069851487517 - 0.5516069851487517 - */ - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); - - // Layer 4 - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - void test_sbt_max_not_fixed() + void test_backwards_sign() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTMax( nlr, tableau ); + populateNetworkBackwardSign( nlr, tableau ); - tableau.setLowerBound( 0, -1 ); + tableau.setLowerBound( 0, 0 ); tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); - // Invoke SBT + + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - /* - Input ranges: + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - x0: [-1, 1] - x1: [-1, 2] + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Layer 1: + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - x2.lb = x0 + x1 : [-2, 3] - x2.ub = x0 + x1 : [-2, 3] + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - x3.lb = x0 - x1 : [-3, 2] - x3.ub = x0 - x1 : [-3, 2] + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - Both ReLUs are undecided, bounds are concretized. - Coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6 - Coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 - x4.lb = 0.6 ( x0 + x1 ) = 0.6x0 + 0.6x1 - x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 - x4 range: [0, 3] - - x5.lb = 0.4 ( x0 - x1 ) = 0.4x0 + 0.4x1 - x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 - x5 range: [0, 2] - - Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub - Max inherits lower bound from x4, and its upper bound is constant 3. - - x6.lb = 0.6x0 + 0.6x1 : [-1.2, 1.8] - x6.ub = 3 : [3, 3] - x6 range: [-1.2, 3] + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Layer 3: + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - x7.lb = 2 ( 0.6x0 + 0.6x1 ) = 1.2x0 + 1.8x1 : [-2.4, 3.6] - x7.ub = 2 ( 3 ) = 6 : [6, 6] - x7 range: [-2.4, 6] - */ + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - List expectedBounds( { - Tightening( 2, -2, Tightening::LB ), - Tightening( 2, 3, Tightening::UB ), - Tightening( 3, -3, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), - Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -2.4, Tightening::LB ), - Tightening( 7, 6, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); - } ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - void test_sbt_max_fixed() + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_leaky_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTMax( nlr, tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -3 ); - tableau.setUpperBound( 1, -2 ); + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); - // Invoke SBT + + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - /* - Input ranges: + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - x0: [1, 2] - x1: [-3, -2] + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Layer 1: + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - x2.lb = x0 + x1 : [-2, 0] - x2.ub = x0 + x1 : [-2, 0] + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - x3.lb = x0 - x1 : [3, 5] - x3.ub = x0 - x1 : [3, 5] + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - First ReLU is negative, bounds become constant 0 - Second ReLU is positive, bounds survive the activation - x4: all set to 0 - - x5.lb = x0 - x1 : [3, 5] - x5.ub = x0 - x1 : [3, 5] - - Max is fixed because x5.lb > x4.ub, it inherits x5's bounds - - x6.lb = x0 - x1 : [3, 5] - x6.ub = x0 - x1 : [3, 5] + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Layer 3: + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] - x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] - */ + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - List expectedBounds( { - Tightening( 2, -2, Tightening::LB ), - Tightening( 2, 0, Tightening::UB ), - Tightening( 3, 3, Tightening::LB ), - Tightening( 3, 5, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 3, Tightening::LB ), - Tightening( 5, 5, Tightening::UB ), - Tightening( 6, 3, Tightening::LB ), - Tightening( 6, 5, Tightening::UB ), - Tightening( 7, 6, Tightening::LB ), - Tightening( 7, 10, Tightening::UB ), + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - } ); + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), - List bounds; + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - - void test_sbt_softmax1() + + void test_backwards_leaky_relu2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax( nlr, tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); @@ -7314,412 +11477,704 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - } - void test_sbt_softmax2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - { - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.000001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.000001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.000001 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 1.0001] - x1: [1, 1.0001] - x2: [1, 1.0001] - */ - List expectedBounds( { Tightening( 3, 2, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0, Tightening::UB ), - Tightening( 6, 0.2595, Tightening::LB ), - Tightening( 6, 0.2595, Tightening::UB ), - Tightening( 7, 0.7054, Tightening::LB ), - Tightening( 7, 0.7054, Tightening::UB ), - Tightening( 8, 0.0351, Tightening::LB ), - Tightening( 8, 0.0351, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), - Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), - Tightening( 10, -1, Tightening::UB ) - + List expectedBounds( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - { - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.000001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.000001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.000001 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 1.0001] - x1: [1, 1.0001] - x2: [1, 1.0001] - */ - List expectedBounds( { Tightening( 3, 2, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0, Tightening::UB ), - Tightening( 6, 0.2595, Tightening::LB ), - Tightening( 6, 0.2595, Tightening::UB ), - Tightening( 7, 0.7054, Tightening::LB ), - Tightening( 7, 0.7054, Tightening::UB ), - Tightening( 8, 0.0351, Tightening::LB ), - Tightening( 8, 0.0351, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), - Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), - Tightening( 10, -1, Tightening::UB ) - + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 14, -0.9, Tightening::LB ), + Tightening( 15, -0.89, Tightening::LB ), + Tightening( 16, -0.77, Tightening::LB ), + + Tightening( 19, -2.3133, Tightening::LB ), + Tightening( 20, -1.2, Tightening::LB ), } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), + Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 14, -1.1, Tightening::LB ), + Tightening( 15, -2.1771, Tightening::LB ), + Tightening( 16, -1.15, Tightening::LB ), + + Tightening( 19, -5.6259, Tightening::LB ), + Tightening( 20, -1.9, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - - void test_sbt_softmax3() + + void test_backwards_softmax_and_max() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax2( nlr, tableau ); + populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.00001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.00001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.00001 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + - x0: [1, 1.0001] - x1: [1, 1.0001] - x2: [1, 1.0001] - */ + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - List expectedBounds({ Tightening( 3, 2, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), - Tightening( 6, -1, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), - Tightening( 7, -2, Tightening::UB ), - Tightening( 8, 0.8668, Tightening::LB ), - Tightening( 8, 0.8668, Tightening::UB ), - Tightening( 9, 0.9820, Tightening::LB ), - Tightening( 9, 0.9820, Tightening::UB ), - Tightening( 10, 0.1173, Tightening::LB ), - Tightening( 10, 0.1173, Tightening::UB ), - Tightening( 11, 0.0179, Tightening::LB ), - Tightening( 11, 0.0179, Tightening::UB ), - Tightening( 12, 0.0159, Tightening::LB ), - Tightening( 12, 0.0159, Tightening::UB ), - Tightening( 13, 0.9470, Tightening::LB ), - Tightening( 13, 0.9470, Tightening::UB ), - Tightening( 14, -0.9470, Tightening::LB ), - Tightening( 14, -0.9470, Tightening::UB ), - Tightening( 15, 1.0253, Tightening::LB ), - Tightening( 15, 1.0253, Tightening::UB ), - Tightening( 16, -1.0253, Tightening::LB ), - Tightening( 16, -1.0253, Tightening::UB ) + Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), + Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), + Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), - } ); + Tightening( 8, -0.1519, Tightening::LB ), Tightening( 8, 1.4775, Tightening::UB ), + Tightening( 9, 0.6614, Tightening::LB ), Tightening( 9, 2.3899, Tightening::UB ), + Tightening( 10, 0.6614, Tightening::LB ), Tightening( 10, 2.3899, Tightening::UB ), + + Tightening( 11, -2.3899, Tightening::LB ), Tightening( 11, -0.6614, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_softmax_bounds_er() - { - Vector inputLb = { -1, 0, 1 }; - Vector inputUb = { 0, 2, 4 }; - Vector input = { -0.5, 1, 2.5 }; + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - double value = NLR::DeepPolySoftmaxElement::ERLowerBound( input, inputLb, inputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0114799, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.00563867, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - Vector outputLb = { 0.2, 0, 0 }; - Vector outputUb = { 0.4, 0.1, 0.1 }; + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - value = NLR::DeepPolySoftmaxElement::ERUpperBound( input, outputLb, outputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, -1.44538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 1.96538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.358535, 0.00001 ) ); - } + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - void test_softmax_bounds_lse1() - { - Vector inputLb = { -1, 0, 1 }; - Vector inputUb = { 0, 2, 3 }; - Vector input = { -0.5, 1, 2 }; - double value = NLR::DeepPolySoftmaxElement::LSELowerBound( input, inputLb, inputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.00703444, 0.001 ) ); + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.0674, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), - Vector outputLb = { 0.2, 0, 0 }; - Vector outputUb = { 0.4, 0.1, 0.1 }; - value = NLR::DeepPolySoftmaxElement::LSEUpperBound( input, outputLb, outputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.272204, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); + Tightening( 10, 0.0674, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + + Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.0674, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - - void test_sbt_bilinear() + + void test_backwards_softmax_and_max2() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkSBTBilinear( nlr, tableau ); + populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -2 ); + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 2] - x1: [-2, 1] - - Layer 1: - - x2.lb = x0 - 2x1 : [-1, 6] - x2.ub = x0 - 2x1 : [-1, 6] - x3.lb = x0 + x1 : [-1, 3] - x3.ub = x0 + x1 : [-1, 3] + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - Coefficients for bilinear layer: - Lower bound: - alpha_l = x3.lb = -1 - beta = x2.lb = -1 - gamma_l = -x2.lb * x3.lb = --1 * -1 = -1 + List expectedBounds( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - Upper bound: - alpha_u = x3.ub = 3 - beta = x2.lb = -1 - gamma_u = -x2.lb * x3.ub = --1 * 3 = 3 - - x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] - x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] - x4 range: [-7, 21] - - Layer 3: + Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), + Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), + Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), + Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + + Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9635, Tightening::UB ), + Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9806, Tightening::UB ), + Tightening( 13, -2.9902, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + + Tightening( 14, 0.1086, Tightening::LB ), Tightening( 14, 0.9971, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8766, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4595, Tightening::UB ), + + Tightening( 17, 0.1086, Tightening::LB ), Tightening( 17, 0.9971, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); - x7.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] - x7.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] - x4 range: [-21, 5] - */ + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); - List expectedBounds( { Tightening( 2, -1, Tightening::LB ), - Tightening( 2, 6, Tightening::UB ), - Tightening( 3, -1, Tightening::LB ), - Tightening( 3, 3, Tightening::UB ), - Tightening( 4, -7, Tightening::LB ), - Tightening( 4, 21, Tightening::UB ), - Tightening( 5, -21, Tightening::LB ), - Tightening( 5, 7, Tightening::UB ) } ); - List bounds; + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + + Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9989, Tightening::UB ), + Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), + Tightening( 13, -2.9989, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + + Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9972, Tightening::UB ), + Tightening( 15, 0.0024, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + + Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9972, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - - void test_concretize_input_assignment() + + void test_backwards_relu_and_bilinear() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); - populateNetwork( nlr ); - - // With ReLUs, Inputs are zeros, only biases count - tableau.nextValues[0] = 0; - tableau.nextValues[1] = 0; - - Map assignment; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - TS_ASSERT( assignment.size() == 14 ); - TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 4 ) ); + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - // With ReLUs, case 1 - tableau.nextValues[0] = 1; - tableau.nextValues[1] = 1; + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); - TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 1 ) ); - // With ReLUs, case 2 - tableau.nextValues[0] = 1; - tableau.nextValues[1] = 2; + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - TS_ASSERT( FloatUtils::areEqual( assignment[12], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { Tightening( 7, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - - - void test_obtain_bound_from_ipq() + + void test_backwards_relu_and_bilinear2() { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); + NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - - Query query; - query.setNumberOfVariables( 14 ); - - - // Initialize the bounds - query.setLowerBound( 0, -1 ); - query.setUpperBound( 0, 1 ); - query.setLowerBound( 1, -1 ); - query.setUpperBound( 1, 1 ); - - double large = 1000; - query.setLowerBound( 2, -large ); - query.setUpperBound( 2, large ); - query.setLowerBound( 3, -large ); - query.setUpperBound( 3, large ); - query.setLowerBound( 4, -large ); - query.setUpperBound( 4, large ); - query.setLowerBound( 5, -large ); - query.setUpperBound( 5, large ); - query.setLowerBound( 6, -large ); - query.setUpperBound( 6, large ); - query.setLowerBound( 7, -large ); - query.setUpperBound( 7, large ); - query.setLowerBound( 8, -large ); - query.setUpperBound( 8, large ); - query.setLowerBound( 9, -large ); - query.setUpperBound( 9, large ); - query.setLowerBound( 10, -large ); - query.setUpperBound( 10, large ); - query.setLowerBound( 11, -large ); - query.setUpperBound( 11, large ); - query.setLowerBound( 12, -large ); - query.setUpperBound( 12, large ); - query.setLowerBound( 13, -large ); - query.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds( query ) ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( + { Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - List bounds; + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { Tightening( 7, 0, Tightening::LB ), - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } bool boundsEqual( const List &bounds, const List &expectedBounds ) @@ -7740,4 +12195,21 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } return allFound; } + + void updateTableau( MockTableau &tableau, List &tightenings ) + { + ASSERT( tableau ); + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + { + tableau.setLowerBound( tightening._variable, tightening._value ); + } + + if ( tightening._type == Tightening::UB ) + { + tableau.setUpperBound( tightening._variable, tightening._value ); + } + } + } }; From 9ee76092ed7ae52453aecd789be868d59e39aeed Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:29:06 +0300 Subject: [PATCH 13/72] 0-6-0: Adding MILP bound types for two backpropagation of bounds algorithms, adjusting in Engine. 0-6-0: Adding MILP bound types for two backpropagation of bounds algorithms, adjusting in Engine. --- src/engine/Engine.cpp | 4 ++++ src/engine/MILPSolverBoundTighteningType.h | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index 35d1ae9f8c..d41ac92dd7 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -1590,6 +1590,8 @@ void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) case MILPSolverBoundTighteningType::LP_RELAXATION_INCREMENTAL: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: _networkLevelReasoner->lpRelaxationPropagation(); break; case MILPSolverBoundTighteningType::MILP_ENCODING: @@ -1681,6 +1683,8 @@ void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIn return; case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: case MILPSolverBoundTighteningType::NONE: return; diff --git a/src/engine/MILPSolverBoundTighteningType.h b/src/engine/MILPSolverBoundTighteningType.h index f317639375..ad839706f7 100644 --- a/src/engine/MILPSolverBoundTighteningType.h +++ b/src/engine/MILPSolverBoundTighteningType.h @@ -34,6 +34,10 @@ enum class MILPSolverBoundTighteningType { // Perform backward analysis BACKWARD_ANALYSIS_ONCE = 5, BACKWARD_ANALYSIS_CONVERGE = 6, + // Perform backward analysis using the INVPROP Algorithm (arXiv:2302.01404v4 [cs.LG]) + BACKWARD_ANALYSIS_INVPROP = 7, + // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 [cs.SE]) + BACKWARD_ANALYSIS_PREIMAGE_APPROX = 8, // Option to have no MILP bound tightening performed NONE = 10, }; From 2d9b85a36aabb28f0cfce05607e3368e1e86a71e Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:31:54 +0300 Subject: [PATCH 14/72] 0-6-1: Adjusting in Options for two new MILP Bound types. 0-6-1: Adjusting in Options for two new MILP Bound types. --- src/configuration/Options.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/configuration/Options.cpp b/src/configuration/Options.cpp index bf3b9b913a..17d570133f 100644 --- a/src/configuration/Options.cpp +++ b/src/configuration/Options.cpp @@ -207,6 +207,10 @@ MILPSolverBoundTighteningType Options::getMILPSolverBoundTighteningType() const return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE; if ( strategyString == "backward-converge" ) return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE; + if ( strategyString == "backward-invprop" ) + return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP; + if ( strategyString == "backward-preimage-approx" ) + return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX; else if ( strategyString == "milp" ) return MILPSolverBoundTighteningType::MILP_ENCODING; else if ( strategyString == "milp-inc" ) From f5c7d1c25bc3c1d03784d799ac2c9ad7280510bf Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:34:54 +0300 Subject: [PATCH 15/72] 0-6-2: Introducing Polygonal Tightenings (output type of two new algorithms). 0-6-2: Introducing Polygonal Tightenings (output type of two new algorithms). --- src/engine/PolygonalTightening.h | 97 ++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 src/engine/PolygonalTightening.h diff --git a/src/engine/PolygonalTightening.h b/src/engine/PolygonalTightening.h new file mode 100644 index 0000000000..538ed9cee0 --- /dev/null +++ b/src/engine/PolygonalTightening.h @@ -0,0 +1,97 @@ +/********************* */ +/*! \file PolygonalTightening.h + ** \verbatim + ** Top contributors (to current version): + ** Duligur Ibeling, Guy Katz, Ido Shmuel + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#ifndef __PolygonalTightening_h__ +#define __PolygonalTightening_h__ + +#include +#include "NeuronIndex.h" +#include "Map.h" + +class PolygonalTightening +{ +public: + enum PolygonalBoundType { + LB = 0, + UB = 1, + }; + + PolygonalTightening( Map neuronToCoefficient, double value, PolygonalBoundType type ) + : _neuronToCoefficient( neuronToCoefficient ) + , _value( value ) + , _type( type ) + { + } + + /* + The coefficient of each neuron. + */ + Map _neuronToCoefficient; + + /* + Its new value. + */ + double _value; + + /* + Whether the tightening tightens the + lower bound or the upper bound. + */ + PolygonalBoundType _type; + + /* + Equality operator. + */ + bool operator==( const PolygonalTightening &other ) const + { + bool allFound = true; + for ( const auto &pair : _neuronToCoefficient ) + { + bool currentFound = false; + for ( const auto &otherPair : other._neuronToCoefficient ) + { + currentFound |= ( pair.first._layer == otherPair.first._layer + && pair.first._neuron == otherPair.first._neuron + && pair.second == otherPair.second ); + } + allFound &= currentFound; + } + bool result = allFound && _value == other._value && _type == other._type; + return result; + } + + void dump() const + { + printf( "PolygonalTightening: x %s %.2lf\n", _type == LB ? ">=" : "<=", _value ); + + for ( const auto &pair : _neuronToCoefficient ) + { + printf( "NeuronIndex: (layer %u, neuron %u), Coefficient: %.2lf )\n", + pair.first._layer, + pair.first._neuron, + pair.second ); + } + } +}; + +#endif // __PolygonalTightening_h__ + +// +// Local Variables: +// compile-command: "make -C ../.. " +// tags-file-name: "../../TAGS" +// c-basic-offset: 4 +// End: +//s From b363d05e5a6904bd99c7174eff358d87f4153ad2 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:36:51 +0300 Subject: [PATCH 16/72] 0-6-3: Adjusting in NLR, LayerOwner for Polygonal Tightenings. 0-6-3: Adjusting in NLR, LayerOwner for Polygonal Tightenings. --- src/nlr/LayerOwner.h | 2 ++ src/nlr/NetworkLevelReasoner.cpp | 16 ++++++++++++++++ src/nlr/NetworkLevelReasoner.h | 15 ++++++++++++++- 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/src/nlr/LayerOwner.h b/src/nlr/LayerOwner.h index 180a4fdebd..122c6a216a 100644 --- a/src/nlr/LayerOwner.h +++ b/src/nlr/LayerOwner.h @@ -18,6 +18,7 @@ #include "ITableau.h" #include "Tightening.h" +#include "PolygonalTightening.h" namespace NLR { @@ -35,6 +36,7 @@ class LayerOwner virtual const ITableau *getTableau() const = 0; virtual unsigned getNumberOfLayers() const = 0; virtual void receiveTighterBound( Tightening tightening ) = 0; + virtual void receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ) = 0; }; } // namespace NLR diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 88621fcaed..04b9b20683 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -198,6 +198,22 @@ void NetworkLevelReasoner::clearConstraintTightenings() _boundTightenings.clear(); } +void NetworkLevelReasoner::receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ) +{ + _polygonalBoundTightenings.append( polygonal_tightening ); +} + +void NetworkLevelReasoner::getConstraintPolygonalTightenings( List &polygonal_tightenings ) +{ + polygonal_tightenings = _polygonalBoundTightenings; + _polygonalBoundTightenings.clear(); +} + +void NetworkLevelReasoner::clearConstraintPolygonalTightenings() +{ + _polygonalBoundTightenings.clear(); +} + void NetworkLevelReasoner::symbolicBoundPropagation() { for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index 97af9610f9..fa125f0116 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -26,6 +26,7 @@ #include "PiecewiseLinearFunctionType.h" #include "Tightening.h" #include "Vector.h" +#include "PolygonalTightening.h" #include @@ -114,6 +115,13 @@ class NetworkLevelReasoner : public LayerOwner - getConstraintTightenings: this is the function that an external user calls in order to collect the tighter bounds discovered by the NLR. + + - receiveTighterPolygonalBound: this is a callback from the layer + objects, through which they report tighter polygonal bounds. + + - getConstraintPolygonalTightenings: this is the function that an + external user calls in order to collect the tighter polygonal bounds + discovered by the NLR. */ void setTableau( const ITableau *tableau ); @@ -133,6 +141,10 @@ class NetworkLevelReasoner : public LayerOwner void receiveTighterBound( Tightening tightening ); void getConstraintTightenings( List &tightenings ); void clearConstraintTightenings(); + + void receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ); + void getConstraintPolygonalTightenings( List &polygonal_tightenings ); + void clearConstraintPolygonalTightenings(); /* For debugging purposes: dump the network topology @@ -198,8 +210,9 @@ class NetworkLevelReasoner : public LayerOwner Map _layerIndexToLayer; const ITableau *_tableau; - // Tightenings discovered by the various layers + // Tightenings and Polyognal Tightenings discovered by the various layers List _boundTightenings; + List _polygonalBoundTightenings; std::unique_ptr _deepPolyAnalysis; From 7993b1e08f4ebed031fdda244710c5663e7ea2ba Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Tue, 1 Oct 2024 23:39:14 +0300 Subject: [PATCH 17/72] 0-6-4: Introducing parameterised bounds in LPFormulator (two new algorithms will optimize value of parameter). 0-6-4: Introducing parameterised bounds in LPFormulator (two new algorithms will optimize value of parameter). --- src/nlr/LPFormulator.cpp | 986 ++++++++++++++++++++++++++++++++++++++- src/nlr/LPFormulator.h | 47 ++ 2 files changed, 1024 insertions(+), 9 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 9341c7188e..8878cbadd0 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -553,7 +553,6 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, } } - void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument &argument ) { try @@ -845,7 +844,6 @@ void LPFormulator::addReluLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -910,7 +908,6 @@ void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -986,7 +983,6 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1081,8 +1077,6 @@ void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, } } - - void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1166,7 +1160,6 @@ void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1248,7 +1241,6 @@ void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, } } - void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1508,7 +1500,6 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const } } - void LPFormulator::addWeightedSumLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1662,6 +1653,983 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, } } +void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + switch ( layer->getLayerType() ) + { + case Layer::INPUT: + addInputLayerToParameterisedLpRelaxation( gurobi, layer, coeff ); + break; + + case Layer::RELU: + addReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::WEIGHTED_SUM: + addWeightedSumLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::ROUND: + addRoundLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::LEAKY_RELU: + addLeakyReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::ABSOLUTE_VALUE: + addAbsoluteValueLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::SIGN: + addSignLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::MAX: + addMaxLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::SIGMOID: + addSigmoidLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::SOFTMAX: + addSoftmaxLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + case Layer::BILINEAR: + addBilinearLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + + default: + throw NLRError( NLRError::LAYER_TYPE_NOT_SUPPORTED, "LPFormulator" ); + break; + } +} + +void LPFormulator::addInputLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + unsigned variable = layer->neuronToVariable( i ); + gurobi.addVariable( Stringf( "x%u", variable ), layer->getLb( i ), layer->getUb( i ) ); + } +} + +void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : 0; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), 0, layer->getUb( i ) ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The ReLU is active, y = x + if ( sourceLb < 0 ) + sourceLb = 0; + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // The ReLU is inactive, y = 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + /* + The phase of this ReLU is not yet fixed. + + For y = ReLU(x), we add the following triangular relaxation: + + 1. y >= 0 + 2. y >= coeff * x + 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) + */ + + // y >= 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= coeff * x, i.e. y - coeff * x >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -coeff, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + /* + u ul + y <= ----- x - ----- + u - l u - l + */ + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -sourceUb / ( sourceUb - sourceLb ), + Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, + ( -sourceUb * sourceLb ) / ( sourceUb - sourceLb ) ); + } + } + } +} + +void LPFormulator::addRoundLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = FloatUtils::round(sourceValue); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + double ub = std::max( FloatUtils::round( sourceUb ), layer->getUb( i ) ); + double lb = std::min( FloatUtils::round( sourceLb ), layer->getLb( i ) ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // If u = l: y = round(u) + if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) + { + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, ub ); + } + + else + { + List terms; + // y <= x + 0.5, i.e. y - x <= 0.5 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, 0.5 ); + + // y >= x - 0.5, i.e. y - x >= -0.5 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, -0.5 ); + } + } + } +} + +void LPFormulator::addAbsoluteValueLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : -sourceValue; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + double ub = std::min( FloatUtils::max( -sourceLb, sourceUb ), layer->getUb( i ) ); + + gurobi.addVariable(Stringf( "x%u", targetVariable ), 0, ub ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The AbsoluteValue is active, y = x + if ( sourceLb < 0 ) + sourceLb = 0; + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // The AbsoluteValue is inactive, y = -x, i.e. y + x = 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + /* + The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). + */ + + // y >= 0 + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y <= max(-lb, ub) + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addLeqConstraint( terms, ub ); + } + } + } +} + +void LPFormulator::addSigmoidLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = SigmoidConstraint::sigmoid(sourceValue); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + double ub = std::min( SigmoidConstraint::sigmoid( sourceUb ), layer->getUb( i ) ); + double lb = std::max( SigmoidConstraint::sigmoid( sourceLb ), layer->getLb( i ) ); + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // If u = l: y = sigmoid(u) + if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) + { + List terms; + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, ub ); + } + + else + { + List terms; + double lambda = ( ub - lb ) / ( sourceUb - sourceLb ); + double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), + SigmoidConstraint::sigmoidDerivative( sourceUb ) ); + + // update lower bound + if ( FloatUtils::isPositive( sourceLb ) ) + { + // y >= lambda * (x - l), i.e. y - lambda * x >= lambda * l + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, sourceLb * lambda ); + } + + else + { + // y >= lambda' * (x - l), i.e. y - lambda' * x >= lambda' * l + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, sourceLb * lambdaPrime ); + } + + // update upper bound + if ( !FloatUtils::isPositive( sourceUb ) ) + { + // y <= lambda * (x - u), i.e. y - lambda * x <= lambda * u + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, sourceUb * lambda ); + } + else + { + // y <= lambda' * (x - u), i.e. y - lambda' * x <= lambda' * u + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, sourceUb * lambdaPrime ); + } + } + } + } +} + +void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( layer->neuronEliminated( i ) ) + continue; + + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = FloatUtils::isNegative( sourceValue ) ? -1 : 1; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The Sign is positive, y = 1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), 1, 1 ); + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + // The Sign is negative, y = -1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, -1 ); + } + else + { + /* + The phase of this Sign is not yet fixed. + + For y = Sign(x), we add the following parallelogram relaxation: + + 1. y >= -1 + 2. y <= -1 + 3. y is below the line the crosses (x.lb,-1) and (0,1) + 4. y is above the line the crosses (0,-1) and (x.ub,1) + */ + + // -1 <= y <= 1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, 1 ); + + /* + 2 + y <= ----- * (1 - coeff) x + 1 + - l + */ + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( 2.0 / sourceLb * (1 - coeff), Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, 1 ); + + /* + 2 + y >= ----- * (1 - coeff) x - 1 + u + */ + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( + GurobiWrapper::Term( -2.0 / sourceUb * (1 - coeff), Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, -1 ); + } + } +} + +void LPFormulator::addMaxLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( layer->neuronEliminated( i ) ) + continue; + + unsigned targetVariable = layer->neuronToVariable( i ); + gurobi.addVariable( + Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); + + List sources = layer->getActivationSources( i ); + + bool haveFixedSourceValue = false; + double maxFixedSourceValue = FloatUtils::negativeInfinity(); + + double maxConcreteUb = FloatUtils::negativeInfinity(); + + List terms; + + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + haveFixedSourceValue = true; + double value = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + if ( value > maxFixedSourceValue ) + maxFixedSourceValue = value; + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + + // Target is at least source: target - source >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // Find maximal concrete upper bound + if ( sourceUb > maxConcreteUb ) + maxConcreteUb = sourceUb; + } + + if ( haveFixedSourceValue && ( maxConcreteUb < maxFixedSourceValue ) ) + { + // At least one of the sources has a fixed value, + // and this fixed value dominates other sources. + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, maxFixedSourceValue ); + } + else + { + // If we have a fixed value, it's a lower bound + if ( haveFixedSourceValue ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addGeqConstraint( terms, maxFixedSourceValue ); + } + + // Target must be smaller than greatest concrete upper bound + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addLeqConstraint( terms, maxConcreteUb ); + } + } +} + +void LPFormulator::addSoftmaxLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( layer->neuronEliminated( i ) ) + continue; + + Set handledInputNeurons; + List sources = layer->getActivationSources( i ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceMids; + Vector targetLbs; + Vector targetUbs; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceMids.append( ( sourceLb + sourceUb ) / 2 ); + targetLbs.append( layer->getLb( i ) ); + targetUbs.append( layer->getUb( i ) ); + } + + // Find the index of i in the softmax + unsigned index = 0; + for ( const auto &source : sources ) + { + if ( handledInputNeurons.exists( source._neuron ) ) + ++index; + else + { + handledInputNeurons.insert( source._neuron ); + break; + } + } + + double ub = std::min( DeepPolySoftmaxElement::linearUpperBound( sourceLbs, sourceUbs, index ), layer->getUb( i ) ); + double lb = std::max( DeepPolySoftmaxElement::linearLowerBound( sourceLbs, sourceUbs, index ), layer->getLb( i ) ); + + unsigned targetVariable = layer->neuronToVariable( i ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + double bias; + SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); + + + List terms; + if ( FloatUtils::areEqual( lb, ub ) ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + gurobi.addEqConstraint( terms, ub ); + } + else + { + // Compute symbolic bound + if ( boundType == SoftmaxBoundType::LOG_SUM_EXP_DECOMPOSITION ) + { + bool useLSE2 = false; + for ( const auto &lb : targetLbs ) + { + if ( lb > GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD ) + useLSE2 = true; + } + unsigned inputIndex = 0; + if ( !useLSE2 ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dldj = + DeepPolySoftmaxElement::dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addGeqConstraint( terms, bias ); + } + else + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dldj = + DeepPolySoftmaxElement::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addGeqConstraint( terms, bias ); + } + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + inputIndex = 0; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dudj = + DeepPolySoftmaxElement::dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addLeqConstraint( terms, bias ); + } + else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) + { + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + unsigned inputIndex = 0; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dldj = + DeepPolySoftmaxElement::dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dldj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addGeqConstraint( terms, bias ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + bias = DeepPolySoftmaxElement::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); + inputIndex = 0; + for ( const auto &source : sources ) + { + const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); + unsigned sourceNeuron = source._neuron; + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double dudj = + DeepPolySoftmaxElement::dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); + bias -= dudj * sourceMids[inputIndex]; + ++inputIndex; + } + gurobi.addLeqConstraint( terms, bias ); + } + } + } +} + +void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + Vector sourceNeurons; + unsigned counter = 0; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + + sourceNeurons.append( sourceNeuron ); + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); + + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + ++counter; + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + ++counter; + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + double targetValue = sourceValues[0] * sourceValues[1]; + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + continue; + } + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // Lower bound: out >= l_y * x + l_x * y - l_x * l_y + List terms; + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -sourceLbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( -sourceLbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); + + // Upper bound: out <= u_y * x + l_x * y - l_x * u_y + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -sourceUbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( -sourceLbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); + } + } +} + +void LPFormulator::addWeightedSumLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + if ( createVariables ) + { + for ( const auto &sourceLayerPair : layer->getSourceLayers() ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerPair.first ); + unsigned sourceLayerSize = sourceLayerPair.second; + + for ( unsigned j = 0; j < sourceLayerSize; ++j ) + { + if ( !sourceLayer->neuronEliminated( j ) ) + { + Stringf sourceVariableName( "x%u", sourceLayer->neuronToVariable( j ) ); + if ( !gurobi.containsVariable( sourceVariableName ) ) + { + gurobi.addVariable( + sourceVariableName, sourceLayer->getLb( j ), sourceLayer->getUb( j ) ); + } + } + } + } + } + + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned variable = layer->neuronToVariable( i ); + + gurobi.addVariable( Stringf( "x%u", variable ), layer->getLb( i ), layer->getUb( i ) ); + + List terms; + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", variable ) ) ); + + double bias = -layer->getBias( i ); + + for ( const auto &sourceLayerPair : layer->getSourceLayers() ) + { + const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerPair.first ); + unsigned sourceLayerSize = sourceLayerPair.second; + + for ( unsigned j = 0; j < sourceLayerSize; ++j ) + { + double weight = layer->getWeight( sourceLayerPair.first, j, i ); + if ( !sourceLayer->neuronEliminated( j ) ) + { + Stringf sourceVariableName( "x%u", sourceLayer->neuronToVariable( j ) ); + terms.append( GurobiWrapper::Term( weight, sourceVariableName ) ); + } + else + { + bias -= weight * sourceLayer->getEliminatedNeuronValue( j ); + } + } + } + + gurobi.addEqConstraint( terms, bias ); + } + } +} + +void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + double slope = layer->getAlpha(); + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) + { + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = sourceValue > 0 ? sourceValue : 0; + + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + + continue; + } + + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + gurobi.addVariable( + Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); + + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The LeakyReLU is active, y = x + + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + // The LeakyReLU is inactive, y = ((alpha+1) * coeff - alpha) * x + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( slope - ( slope + 1 ) * coeff, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addEqConstraint( terms, 0 ); + } + else + { + double width = sourceUb - sourceLb; + double coeff = ( sourceUb - slope * sourceLb ) / width; + double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; + + /* + The phase of this LeakyReLU is not yet fixed. + For y = LeakyReLU(x), we add the following triangular relaxation: + 1. y >= ((alpha+1) * coeff - alpha) * x + 2. y >= x + 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) + */ + + // y >= ((alpha+1) * coeff - alpha) * x + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( slope - ( slope + 1 ) * coeff, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= x, i.e. y - x >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -coeff, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, bias ); + } + } + } +} + void LPFormulator::setCutoff( double cutoff ) { _cutoffInUse = true; diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index 135854bf6b..07143618d8 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -20,6 +20,7 @@ #include "LayerOwner.h" #include "Map.h" #include "ParallelSolver.h" +#include "PolygonalTightening.h" #include #include @@ -53,6 +54,8 @@ class LPFormulator : public ParallelSolver */ void optimizeBoundsWithLpRelaxation( const Map &layers, bool backward = false ); + void optimizeBoundsWithInvprop( const Map &layers ); + void optimizeBoundsWithPreimageApproximation( const Map &layers ); void optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ); void optimizeBoundsWithIncrementalLpRelaxation( const Map &layers ); @@ -124,6 +127,50 @@ class LPFormulator : public ParallelSolver bool createVariables ); void optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, bool backward ); + + + // Create LP relaxations depending on an external parameter. + void addLayerToParameterisedModel( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ); + + void + addInputLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, double coeff ); + + void + addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addMaxLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addRoundLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addAbsoluteValueLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addSigmoidLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addSoftmaxLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); + + void + addWeightedSumLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ); + /* Optimize for the min/max value of variableName with respect to the constraints From c888160d133d59899a56cc22fc7a79b857151dca Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 10 Oct 2024 20:53:12 +0300 Subject: [PATCH 18/72] 0-6-5: Current state of PreimageApproximation + cleanup + corrections. 0-6-5: Current state of PreimageApproximation + cleanup + corrections. --- src/nlr/LPFormulator.cpp | 966 ++++++------------------------- src/nlr/LPFormulator.h | 39 +- src/nlr/Layer.cpp | 958 +++++++++++++++++++++++++----- src/nlr/Layer.h | 11 +- src/nlr/NetworkLevelReasoner.cpp | 12 + src/nlr/NetworkLevelReasoner.h | 9 +- 6 files changed, 1005 insertions(+), 990 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 8878cbadd0..6876586a57 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -15,13 +15,13 @@ #include "LPFormulator.h" +#include "DeepPolySoftmaxElement.h" #include "GurobiWrapper.h" #include "InfeasibleQueryException.h" #include "Layer.h" #include "MStringf.h" #include "NLRError.h" #include "Options.h" -#include "DeepPolySoftmaxElement.h" #include "TimeUtils.h" #include "Vector.h" @@ -249,7 +249,8 @@ void LPFormulator::optimizeBoundsWithIncrementalLpRelaxation( const Map &layers, - bool backward ) + bool backward, + std::vector coeffs ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -302,7 +303,7 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map &solverToIndex ); // optimize every neuron of layer - optimizeBoundsOfNeuronsWithLpRlaxation( argument, backward ); + optimizeBoundsOfNeuronsWithLpRelaxation( argument, backward, coeffs ); LPFormulator_LOG( Stringf( "Tightening bound for layer %u - done", layerIndex ).ascii() ); } @@ -336,6 +337,40 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map throw InfeasibleQueryException(); } +void LPFormulator::optimizeBoundsWithInvprop( const Map &layers ) +{ + Map coeffs; + for ( const auto &pair : layers ) + { + //coeffs.insert( pair.first, GlobalConfiguration::INVPROP_INITIAL_ALPHA ); + coeffs.insert( pair.first, 0.5 ); + } +} + +void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map &layers ) +{ + // Define EstimateVolume bind which captures layers and only receives coeffs as its input. + auto EstimateVolumeBind = std::bind( EstimateVolume, layers, std::placeholders::_1 ); + + // Search over coeffs in [0, 1]^depth. Enforce single-threaded optimization. + auto opt_params = boost::math::optimization::differential_evolution_parameters>(); + unsigned depth = layers.size(); + opt_params.lower_bounds.resize( depth, 0 ); + opt_params.upper_bounds.resize( depth, 1 ); + opt_params.threads = 1; + double value_to_reach = 0; + std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); + + // Find optimal coeffs and run parameterised backward LP relaxation. + auto optimal_coeffs = boost::math::optimization::differential_evolution( EstimateVolumeBind, opt_params, rng, value_to_reach ); + optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); +} + +double LPFormulator::EstimateVolume( const Map &layers, std::vector coeffs ) +{ + return 0; +} + void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ) { @@ -384,7 +419,7 @@ void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map coeffs ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -522,9 +557,9 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, mtx.lock(); if ( backward ) - createLPRelaxationAfter( layers, *freeSolver, lastIndexOfRelaxation ); + createLPRelaxationAfter( layers, *freeSolver, lastIndexOfRelaxation, coeffs ); else - createLPRelaxation( layers, *freeSolver, lastIndexOfRelaxation ); + createLPRelaxation( layers, *freeSolver, lastIndexOfRelaxation, coeffs ); mtx.unlock(); // spawn a thread to tighten the bounds for the current variable @@ -645,20 +680,29 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & void LPFormulator::createLPRelaxation( const Map &layers, GurobiWrapper &gurobi, - unsigned lastLayer ) + unsigned lastLayer, + std::vector coeffs ) { for ( const auto &layer : layers ) { - if ( layer.second->getLayerIndex() > lastLayer ) + unsigned currentLayerIndex = layer.second->getLayerIndex(); + if ( currentLayerIndex > lastLayer ) continue; - addLayerToModel( gurobi, layer.second, false ); + if ( coeffs.empty() ) + addLayerToModel( gurobi, layer.second, false ); + else + { + double coeff = coeffs[currentLayerIndex]; + addLayerToParameterisedModel( gurobi, layer.second, false, coeff ); + } } } void LPFormulator::createLPRelaxationAfter( const Map &layers, GurobiWrapper &gurobi, - unsigned firstLayer ) + unsigned firstLayer, + std::vector coeffs ) { unsigned depth = GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH; std::priority_queue, std::greater> layersToAdd; @@ -676,7 +720,14 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers continue; else { - addLayerToModel( gurobi, currentLayer, true ); + if ( coeffs.empty() ) + addLayerToModel( gurobi, currentLayer, true ); + else + { + double coeff = coeffs[currentLayerIndex]; + addLayerToParameterisedModel( gurobi, currentLayer, true, coeff ); + } + for ( const auto &nextLayer : currentLayer->getSuccessorLayers() ) { if ( layerToDepth.exists( nextLayer ) ) @@ -1203,7 +1254,6 @@ void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - // Target is at least source: target - source >= 0 terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1298,7 +1348,6 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, double bias; SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); - List terms; if ( FloatUtils::areEqual( lb, ub ) ) { @@ -1621,7 +1670,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, else { double width = sourceUb - sourceLb; - double coeff = ( sourceUb - slope * sourceLb ) / width; + double weight = ( sourceUb - slope * sourceLb ) / width; double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; /* @@ -1646,7 +1695,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -coeff, Stringf( "x%u", sourceVariable ) ) ); + terms.append( GurobiWrapper::Term( -weight, Stringf( "x%u", sourceVariable ) ) ); gurobi.addLeqConstraint( terms, bias ); } } @@ -1660,65 +1709,25 @@ void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, { switch ( layer->getLayerType() ) { - case Layer::INPUT: - addInputLayerToParameterisedLpRelaxation( gurobi, layer, coeff ); - break; - + case Layer::RELU: addReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); break; - case Layer::WEIGHTED_SUM: - addWeightedSumLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); - break; - - case Layer::ROUND: - addRoundLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); - break; - case Layer::LEAKY_RELU: addLeakyReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); break; - case Layer::ABSOLUTE_VALUE: - addAbsoluteValueLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); - break; - case Layer::SIGN: addSignLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); break; - - case Layer::MAX: - addMaxLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); - break; - - case Layer::SIGMOID: - addSigmoidLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); - break; - case Layer::SOFTMAX: - addSoftmaxLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); - break; - - case Layer::BILINEAR: - addBilinearLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); - break; - default: - throw NLRError( NLRError::LAYER_TYPE_NOT_SUPPORTED, "LPFormulator" ); + addLayerToModel( gurobi, layer, createVariables ); break; } } -void LPFormulator::addInputLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, double coeff ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - unsigned variable = layer->neuronToVariable( i ); - gurobi.addVariable( Stringf( "x%u", variable ), layer->getLb( i ), layer->getUb( i ) ); - } -} - void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, @@ -1789,7 +1798,7 @@ void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurob terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); gurobi.addGeqConstraint( terms, 0 ); - // y >= coeff * x, i.e. y - coeff * x >= 0 + // y >= coeff * x, i.e. y - coeff * x >= 0 (varies continuously between y >= 0 and y >= alpha * x). terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -coeff, Stringf( "x%u", sourceVariable ) ) ); @@ -1811,76 +1820,96 @@ void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurob } } -void LPFormulator::addRoundLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) +void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { - if ( !layer->neuronEliminated( i ) ) + if ( layer->neuronEliminated( i ) ) + continue; + + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + unsigned sourceNeuron = sources.begin()->_neuron; + + if ( sourceLayer->neuronEliminated( sourceNeuron ) ) { - unsigned targetVariable = layer->neuronToVariable( i ); + // If the source neuron has been eliminated, this neuron is constant + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + double targetValue = FloatUtils::isNegative( sourceValue ) ? -1 : 1; - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = FloatUtils::round(sourceValue); + continue; + } - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); - continue; - } + if ( !FloatUtils::isNegative( sourceLb ) ) + { + // The Sign is positive, y = 1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), 1, 1 ); + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + // The Sign is negative, y = -1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, -1 ); + } + else + { + /* + The phase of this Sign is not yet fixed. - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); + For y = Sign(x), we add the following parallelogram relaxation: - double ub = std::max( FloatUtils::round( sourceUb ), layer->getUb( i ) ); - double lb = std::min( FloatUtils::round( sourceLb ), layer->getLb( i ) ); - - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - // If u = l: y = round(u) - if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) - { - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, ub ); - } - - else - { - List terms; - // y <= x + 0.5, i.e. y - x <= 0.5 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, 0.5 ); - - // y >= x - 0.5, i.e. y - x >= -0.5 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, -0.5 ); - } + 1. y >= -1 + 2. y <= -1 + 3. y is below the line the crosses (x.lb,-1) and (0,1) + 4. y is above the line the crosses (0,-1) and (x.ub,1) + */ + + // -1 <= y <= 1 + gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, 1 ); + + /* + 2 + y <= ----- * (1 - coeff) x + 1 (varies continuously between y <= 1 and y <= -2 / l * x + 1). + - l + */ + List terms; + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( 2.0 / sourceLb * (1 - coeff), Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, 1 ); + + /* + 2 + y >= ----- * (1 - coeff) x - 1 (varies continuously between y >= -1 and y >= 2 / u * x - 1). + u + */ + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( + GurobiWrapper::Term( -2.0 / sourceUb * (1 - coeff), Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, -1 ); } } } -void LPFormulator::addAbsoluteValueLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) +void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) { + double slope = layer->getAlpha(); for ( unsigned i = 0; i < layer->getSize(); ++i ) { if ( !layer->neuronEliminated( i ) ) @@ -1895,7 +1924,7 @@ void LPFormulator::addAbsoluteValueLayerToParameterisedLpRelaxation( GurobiWrapp { // If the source neuron has been eliminated, this neuron is constant double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = sourceValue > 0 ? sourceValue : -sourceValue; + double targetValue = sourceValue > 0 ? sourceValue : 0; gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); @@ -1905,19 +1934,17 @@ void LPFormulator::addAbsoluteValueLayerToParameterisedLpRelaxation( GurobiWrapp unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceVariable ); if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - double ub = std::min( FloatUtils::max( -sourceLb, sourceUb ), layer->getUb( i ) ); - gurobi.addVariable(Stringf( "x%u", targetVariable ), 0, ub ); + gurobi.addVariable( + Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); if ( !FloatUtils::isNegative( sourceLb ) ) { - // The AbsoluteValue is active, y = x - if ( sourceLb < 0 ) - sourceLb = 0; + // The LeakyReLU is active, y = x List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1926,704 +1953,41 @@ void LPFormulator::addAbsoluteValueLayerToParameterisedLpRelaxation( GurobiWrapp } else if ( !FloatUtils::isPositive( sourceUb ) ) { - // The AbsoluteValue is inactive, y = -x, i.e. y + x = 0 + // The LeakyReLU is inactive, y = alpha * x List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", sourceVariable ) ) ); + terms.append( GurobiWrapper::Term( slope, Stringf( "x%u", sourceVariable ) ) ); gurobi.addEqConstraint( terms, 0 ); } else { + double width = sourceUb - sourceLb; + double weight = ( sourceUb - slope * sourceLb ) / width; + double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; + /* - The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). + The phase of this LeakyReLU is not yet fixed. + For y = LeakyReLU(x), we add the following triangular relaxation: + 1. y >= ((1 - alpha) * coeff + alpha) * x (varies continuously between y >= alpha * x and y >= x). + 2. y >= x + 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) */ - // y >= 0 + // y >= ((alpha+1) * coeff - alpha) * x List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope - ( 1 - slope ) * coeff, Stringf( "x%u", sourceVariable ) ) ); gurobi.addGeqConstraint( terms, 0 ); - // y <= max(-lb, ub) + // y >= x, i.e. y - x >= 0 terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addLeqConstraint( terms, ub ); - } - } - } -} - -void LPFormulator::addSigmoidLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = SigmoidConstraint::sigmoid(sourceValue); - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - double ub = std::min( SigmoidConstraint::sigmoid( sourceUb ), layer->getUb( i ) ); - double lb = std::max( SigmoidConstraint::sigmoid( sourceLb ), layer->getLb( i ) ); - - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - // If u = l: y = sigmoid(u) - if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) - { - List terms; terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, ub ); - } - - else - { - List terms; - double lambda = ( ub - lb ) / ( sourceUb - sourceLb ); - double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), - SigmoidConstraint::sigmoidDerivative( sourceUb ) ); - - // update lower bound - if ( FloatUtils::isPositive( sourceLb ) ) - { - // y >= lambda * (x - l), i.e. y - lambda * x >= lambda * l - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, sourceLb * lambda ); - } - - else - { - // y >= lambda' * (x - l), i.e. y - lambda' * x >= lambda' * l - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, sourceLb * lambdaPrime ); - } - - // update upper bound - if ( !FloatUtils::isPositive( sourceUb ) ) - { - // y <= lambda * (x - u), i.e. y - lambda * x <= lambda * u - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, sourceUb * lambda ); - } - else - { - // y <= lambda' * (x - u), i.e. y - lambda' * x <= lambda' * u - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, sourceUb * lambdaPrime ); - } - } - } - } -} - -void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( layer->neuronEliminated( i ) ) - continue; - - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = FloatUtils::isNegative( sourceValue ) ? -1 : 1; - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - if ( !FloatUtils::isNegative( sourceLb ) ) - { - // The Sign is positive, y = 1 - gurobi.addVariable( Stringf( "x%u", targetVariable ), 1, 1 ); - } - else if ( FloatUtils::isNegative( sourceUb ) ) - { - // The Sign is negative, y = -1 - gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, -1 ); - } - else - { - /* - The phase of this Sign is not yet fixed. - - For y = Sign(x), we add the following parallelogram relaxation: - - 1. y >= -1 - 2. y <= -1 - 3. y is below the line the crosses (x.lb,-1) and (0,1) - 4. y is above the line the crosses (0,-1) and (x.ub,1) - */ - - // -1 <= y <= 1 - gurobi.addVariable( Stringf( "x%u", targetVariable ), -1, 1 ); - - /* - 2 - y <= ----- * (1 - coeff) x + 1 - - l - */ - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( 2.0 / sourceLb * (1 - coeff), Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, 1 ); - - /* - 2 - y >= ----- * (1 - coeff) x - 1 - u - */ - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( - GurobiWrapper::Term( -2.0 / sourceUb * (1 - coeff), Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, -1 ); - } - } -} - -void LPFormulator::addMaxLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( layer->neuronEliminated( i ) ) - continue; - - unsigned targetVariable = layer->neuronToVariable( i ); - gurobi.addVariable( - Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); - - List sources = layer->getActivationSources( i ); - - bool haveFixedSourceValue = false; - double maxFixedSourceValue = FloatUtils::negativeInfinity(); - - double maxConcreteUb = FloatUtils::negativeInfinity(); - - List terms; - - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - haveFixedSourceValue = true; - double value = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - if ( value > maxFixedSourceValue ) - maxFixedSourceValue = value; - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - - // Target is at least source: target - source >= 0 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - // Find maximal concrete upper bound - if ( sourceUb > maxConcreteUb ) - maxConcreteUb = sourceUb; - } - - if ( haveFixedSourceValue && ( maxConcreteUb < maxFixedSourceValue ) ) - { - // At least one of the sources has a fixed value, - // and this fixed value dominates other sources. - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, maxFixedSourceValue ); - } - else - { - // If we have a fixed value, it's a lower bound - if ( haveFixedSourceValue ) - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addGeqConstraint( terms, maxFixedSourceValue ); - } - - // Target must be smaller than greatest concrete upper bound - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addLeqConstraint( terms, maxConcreteUb ); - } - } -} - -void LPFormulator::addSoftmaxLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( layer->neuronEliminated( i ) ) - continue; - - Set handledInputNeurons; - List sources = layer->getActivationSources( i ); - - Vector sourceLbs; - Vector sourceUbs; - Vector sourceMids; - Vector targetLbs; - Vector targetUbs; - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceMids.append( ( sourceLb + sourceUb ) / 2 ); - targetLbs.append( layer->getLb( i ) ); - targetUbs.append( layer->getUb( i ) ); - } - - // Find the index of i in the softmax - unsigned index = 0; - for ( const auto &source : sources ) - { - if ( handledInputNeurons.exists( source._neuron ) ) - ++index; - else - { - handledInputNeurons.insert( source._neuron ); - break; - } - } - - double ub = std::min( DeepPolySoftmaxElement::linearUpperBound( sourceLbs, sourceUbs, index ), layer->getUb( i ) ); - double lb = std::max( DeepPolySoftmaxElement::linearLowerBound( sourceLbs, sourceUbs, index ), layer->getLb( i ) ); - - unsigned targetVariable = layer->neuronToVariable( i ); - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - double bias; - SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); - - - List terms; - if ( FloatUtils::areEqual( lb, ub ) ) - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - gurobi.addEqConstraint( terms, ub ); - } - else - { - // Compute symbolic bound - if ( boundType == SoftmaxBoundType::LOG_SUM_EXP_DECOMPOSITION ) - { - bool useLSE2 = false; - for ( const auto &lb : targetLbs ) - { - if ( lb > GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD ) - useLSE2 = true; - } - unsigned inputIndex = 0; - if ( !useLSE2 ) - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = - DeepPolySoftmaxElement::dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addGeqConstraint( terms, bias ); - } - else - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = - DeepPolySoftmaxElement::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addGeqConstraint( terms, bias ); - } - - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); - inputIndex = 0; - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = - DeepPolySoftmaxElement::dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dudj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addLeqConstraint( terms, bias ); - } - else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) - { - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); - unsigned inputIndex = 0; - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = - DeepPolySoftmaxElement::dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dldj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addGeqConstraint( terms, bias ); - - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); - inputIndex = 0; - for ( const auto &source : sources ) - { - const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); - unsigned sourceNeuron = source._neuron; - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = - DeepPolySoftmaxElement::dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dudj * sourceMids[inputIndex]; - ++inputIndex; - } - gurobi.addLeqConstraint( terms, bias ); - } - } - } -} - -void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) -{ - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - Vector sourceLbs; - Vector sourceUbs; - Vector sourceValues; - Vector sourceNeurons; - unsigned counter = 0; - bool allConstant = true; - for ( const auto &sourceIndex : sources ) - { - unsigned sourceNeuron = sourceIndex._neuron; - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); - - sourceNeurons.append( sourceNeuron ); - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - ++counter; - - if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) - { - allConstant = false; - } - else - { - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - sourceValues.append( sourceValue ); - } - ++counter; - } - - if ( allConstant ) - { - // If the both source neurons have been eliminated, this neuron is constant - double targetValue = sourceValues[0] * sourceValues[1]; - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - continue; - } - - double lb = FloatUtils::infinity(); - double ub = FloatUtils::negativeInfinity(); - List values = { sourceLbs[0] * sourceLbs[1], - sourceLbs[0] * sourceUbs[1], - sourceUbs[0] * sourceLbs[1], - sourceUbs[0] * sourceUbs[1] }; - for ( const auto &v : values ) - { - if ( v < lb ) - lb = v; - if ( v > ub ) - ub = v; - } - - gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - - // Lower bound: out >= l_y * x + l_x * y - l_x * l_y - List terms; - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -sourceLbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); - terms.append( GurobiWrapper::Term( -sourceLbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); - gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); - - // Upper bound: out <= u_y * x + l_x * y - l_x * u_y - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -sourceUbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); - terms.append( GurobiWrapper::Term( -sourceLbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); - gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); - } - } -} - -void LPFormulator::addWeightedSumLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) -{ - if ( createVariables ) - { - for ( const auto &sourceLayerPair : layer->getSourceLayers() ) - { - const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerPair.first ); - unsigned sourceLayerSize = sourceLayerPair.second; - - for ( unsigned j = 0; j < sourceLayerSize; ++j ) - { - if ( !sourceLayer->neuronEliminated( j ) ) - { - Stringf sourceVariableName( "x%u", sourceLayer->neuronToVariable( j ) ); - if ( !gurobi.containsVariable( sourceVariableName ) ) - { - gurobi.addVariable( - sourceVariableName, sourceLayer->getLb( j ), sourceLayer->getUb( j ) ); - } - } - } - } - } - - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned variable = layer->neuronToVariable( i ); - - gurobi.addVariable( Stringf( "x%u", variable ), layer->getLb( i ), layer->getUb( i ) ); - - List terms; - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", variable ) ) ); - - double bias = -layer->getBias( i ); - - for ( const auto &sourceLayerPair : layer->getSourceLayers() ) - { - const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerPair.first ); - unsigned sourceLayerSize = sourceLayerPair.second; - - for ( unsigned j = 0; j < sourceLayerSize; ++j ) - { - double weight = layer->getWeight( sourceLayerPair.first, j, i ); - if ( !sourceLayer->neuronEliminated( j ) ) - { - Stringf sourceVariableName( "x%u", sourceLayer->neuronToVariable( j ) ); - terms.append( GurobiWrapper::Term( weight, sourceVariableName ) ); - } - else - { - bias -= weight * sourceLayer->getEliminatedNeuronValue( j ); - } - } - } - - gurobi.addEqConstraint( terms, bias ); - } - } -} - -void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) -{ - double slope = layer->getAlpha(); - for ( unsigned i = 0; i < layer->getSize(); ++i ) - { - if ( !layer->neuronEliminated( i ) ) - { - unsigned targetVariable = layer->neuronToVariable( i ); - - List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - unsigned sourceNeuron = sources.begin()->_neuron; - - if ( sourceLayer->neuronEliminated( sourceNeuron ) ) - { - // If the source neuron has been eliminated, this neuron is constant - double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = sourceValue > 0 ? sourceValue : 0; - - gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); - - continue; - } - - unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double sourceLb = sourceLayer->getLb( sourceNeuron ); - double sourceUb = sourceLayer->getUb( sourceNeuron ); - - String sourceName = Stringf( "x%u", sourceVariable ); - if ( createVariables && !gurobi.containsVariable( sourceName ) ) - gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - gurobi.addVariable( - Stringf( "x%u", targetVariable ), layer->getLb( i ), layer->getUb( i ) ); - - if ( !FloatUtils::isNegative( sourceLb ) ) - { - // The LeakyReLU is active, y = x - - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addEqConstraint( terms, 0 ); - } - else if ( !FloatUtils::isPositive( sourceUb ) ) - { - // The LeakyReLU is inactive, y = ((alpha+1) * coeff - alpha) * x - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( slope - ( slope + 1 ) * coeff, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addEqConstraint( terms, 0 ); - } - else - { - double width = sourceUb - sourceLb; - double coeff = ( sourceUb - slope * sourceLb ) / width; - double bias = ( ( slope - 1 ) * sourceUb * sourceLb ) / width; - - /* - The phase of this LeakyReLU is not yet fixed. - For y = LeakyReLU(x), we add the following triangular relaxation: - 1. y >= ((alpha+1) * coeff - alpha) * x - 2. y >= x - 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) - */ - - // y >= ((alpha+1) * coeff - alpha) * x - List terms; - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( slope - ( slope + 1 ) * coeff, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - // y >= x, i.e. y - x >= 0 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, 0 ); - - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -coeff, Stringf( "x%u", sourceVariable ) ) ); + terms.append( GurobiWrapper::Term( -weight, Stringf( "x%u", sourceVariable ) ) ); gurobi.addLeqConstraint( terms, bias ); } } diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index 07143618d8..4184839d9c 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -53,7 +54,8 @@ class LPFormulator : public ParallelSolver constructed from scratch */ void optimizeBoundsWithLpRelaxation( const Map &layers, - bool backward = false ); + bool backward = false, + std::vector coeffs = {} ); void optimizeBoundsWithInvprop( const Map &layers ); void optimizeBoundsWithPreimageApproximation( const Map &layers ); void optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, @@ -75,10 +77,12 @@ class LPFormulator : public ParallelSolver */ void createLPRelaxation( const Map &layers, GurobiWrapper &gurobi, - unsigned lastLayer = UINT_MAX ); + unsigned lastLayer = UINT_MAX, + std::vector coeffs = {} ); void createLPRelaxationAfter( const Map &layers, GurobiWrapper &gurobi, - unsigned firstLayer ); + unsigned firstLayer, + std::vector coeffs = {} ); double solveLPRelaxation( GurobiWrapper &gurobi, const Map &layers, MinOrMax minOrMax, @@ -126,7 +130,7 @@ class LPFormulator : public ParallelSolver const Layer *layer, bool createVariables ); - void optimizeBoundsOfNeuronsWithLpRlaxation( ThreadArgument &args, bool backward ); + void optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, bool backward, std::vector coeffs = {} ); // Create LP relaxations depending on an external parameter. @@ -135,9 +139,6 @@ class LPFormulator : public ParallelSolver bool createVariables, double coeff ); - void - addInputLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, double coeff ); - void addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); @@ -147,29 +148,9 @@ class LPFormulator : public ParallelSolver void addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - void - addMaxLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - - void - addRoundLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - - void - addAbsoluteValueLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - - void - addSigmoidLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - - void - addSoftmaxLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - void - addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - - void - addWeightedSumLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ); + // Estimate Volume of parameterised LP relaxations. + static double EstimateVolume( const Map &layers, std::vector coeffs ); /* diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index c7f728e860..6fdb081cba 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -2,7 +2,7 @@ /*! \file Layer.cpp ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Ido Shmuel + ** Guy Katz ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -1061,7 +1061,6 @@ void Layer::computeIntervalArithmeticBoundsForSigmoid() } } - void Layer::computeIntervalArithmeticBoundsForRound() { for ( unsigned i = 0; i < _size; ++i ) @@ -1094,7 +1093,6 @@ void Layer::computeIntervalArithmeticBoundsForRound() } } - void Layer::computeIntervalArithmeticBoundsForMax() { for ( unsigned i = 0; i < _size; ++i ) @@ -1685,8 +1683,6 @@ void Layer::computeSymbolicBoundsForSign() _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - std::cout << "sign, p-ll, ul, lu, uu: " << _symbolicLbOfLb[i] << " " << _symbolicUbOfLb[i] << " " << _symbolicLbOfUb[i] << " " << _symbolicUbOfUb[i] << " " << std::endl; // Has the b variable been fixed? if ( !FloatUtils::isNegative( sourceLb ) ) @@ -1728,7 +1724,6 @@ void Layer::computeSymbolicBoundsForSign() for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicUb[j * _size + i] *= factor; - // Do the same for the bias, and then adjust _symbolicUpperBias[i] *= factor; _symbolicUpperBias[i] += 1; @@ -1756,13 +1751,11 @@ void Layer::computeSymbolicBoundsForSign() for ( unsigned j = 0; j < _inputLayerSize; ++j ) { _symbolicLb[j * _size + i] *= factor; - std::cout << "sign, p-lb: " << _symbolicLb[j * _size + i] << std::endl; } // Do the same for the bias, and then adjust _symbolicLowerBias[i] *= factor; _symbolicLowerBias[i] -= 1; - std::cout << "sign, p-lB: " << _symbolicLowerBias[i] << std::endl; } if (upperSignPhase == PHASE_NOT_FIXED) @@ -2051,17 +2044,17 @@ void Layer::computeSymbolicBoundsForLeakyRelu() // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) // Concrete upper bound: x_f <= ub_b double width = sourceUb - sourceLb; - double coeff = ( sourceUb - _alpha * sourceLb ) / width; + double weight = ( sourceUb - _alpha * sourceLb ) / width; if ( _alpha <= 1 ) { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - _symbolicUb[j * _size + i] *= coeff; + _symbolicUb[j * _size + i] *= weight; } // Do the same for the bias, and then adjust - _symbolicUpperBias[i] *= coeff; + _symbolicUpperBias[i] *= weight; _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; @@ -2096,11 +2089,11 @@ void Layer::computeSymbolicBoundsForLeakyRelu() { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - _symbolicLb[j * _size + i] *= coeff; + _symbolicLb[j * _size + i] *= weight; } // Do the same for the bias, and then adjust - _symbolicLowerBias[i] *= coeff; + _symbolicLowerBias[i] *= weight; _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; if ( sourceUb > sourceLb ) @@ -2213,7 +2206,6 @@ void Layer::computeSymbolicBoundsForLeakyRelu() } } - void Layer::computeSymbolicBoundsForSigmoid() { std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); @@ -3301,7 +3293,6 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } - /* We now have the symbolic representation for the current layer. Next, we compute new lower and upper bounds for @@ -3371,178 +3362,831 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } -double Layer::softmaxLSELowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) +void Layer::computeParameterisedSymbolicBounds( double coeff ) { - double sum = 0; - for ( unsigned j = 0; j < inputs.size(); ++j ) + switch ( _type ) { - double lj = inputLbs[j]; - double uj = inputUbs[j]; - double xj = inputs[j]; - sum += - ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); - } + case RELU: + computeParameterisedSymbolicBoundsForRelu( coeff ); + break; - return std::exp( inputs[i] ) / sum; + case SIGN: + computeParameterisedSymbolicBoundsForSign( coeff ); + break; + + case LEAKY_RELU: + computeParameterisedSymbolicBoundsForLeakyRelu( coeff ); + break; + + default: + computeSymbolicBounds(); + break; + } } -double Layer::softmaxdLSELowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) +void Layer::computeParameterisedSymbolicBoundsForRelu( double coeff ) { - double val = 0; - if ( i == di ) - val += softmaxLSELowerBound( inputMids, inputLbs, inputUbs, i ); - - double ldi = inputLbs[di]; - double udi = inputUbs[di]; + ASSERT( coeff >= 0 && coeff <= 1 ); + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) + for ( unsigned i = 0; i < _size; ++i ) { - double lj = inputLbs[j]; - double uj = inputUbs[j]; - double xj = inputMids[j]; + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; - sum += - ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } } - val -= std::exp( inputMids[i] ) / ( sum * sum ) * ( std::exp( udi ) - std::exp( ldi ) ) / - ( udi - ldi ); + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; - return val; -} + /* + There are two ways we can determine that a ReLU has become fixed: -double Layer::softmaxLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) -{ - double max = FloatUtils::negativeInfinity(); - unsigned maxInputIndex = 0; - unsigned index = 0; - for ( const auto &mid : inputMids ) - { - if ( mid > max ) - { - max = mid; - maxInputIndex = index; - } - ++index; - } + 1. If the ReLU's variable has been externally fixed + 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) + */ + PhaseStatus reluPhase = PHASE_NOT_FIXED; - if ( maxInputIndex == i ) - return softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); - else - { - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) - { - if ( j == maxInputIndex ) - sum += 1; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + // Has the f variable been eliminated or fixed? + if ( FloatUtils::isPositive( _lb[i] ) ) + reluPhase = RELU_PHASE_ACTIVE; + else if ( FloatUtils::isZero( _ub[i] ) ) + reluPhase = RELU_PHASE_INACTIVE; - sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + - ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); - } - } + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - return std::exp( inputMids[i] - inputMids[maxInputIndex] ) / sum; - } -} + /* + A ReLU initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); -double Layer::softmaxdLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) -{ - double max = FloatUtils::negativeInfinity(); - unsigned maxInputIndex = 0; - unsigned index = 0; - for ( const auto &mid : inputMids ) - { - if ( mid > max ) + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - max = mid; - maxInputIndex = index; + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; } - ++index; - } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; - if ( maxInputIndex == i ) - return softmaxdERLowerBound( inputMids, inputLbs, inputUbs, i, di ); - else - { - double val = softmaxLSELowerBound2( inputMids, inputLbs, inputUbs, i ); + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); - double sum = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) { - if ( j == maxInputIndex ) - sum += 1; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - double xjjstar = inputMids[j] - inputMids[maxInputIndex]; - sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + - ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); - } + reluPhase = RELU_PHASE_ACTIVE; } - double val2 = std::exp( inputMids[i] - inputMids[maxInputIndex] ) / ( sum * sum ); - - if ( i == di ) + else if ( !FloatUtils::isPositive( sourceUb ) ) { - double ldijstar = inputLbs[i] - inputUbs[maxInputIndex]; - double udijstar = inputUbs[i] - inputLbs[maxInputIndex]; - return val - - val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / ( udijstar - ldijstar ); + reluPhase = RELU_PHASE_INACTIVE; } - else if ( maxInputIndex == di ) + + if ( reluPhase == PHASE_NOT_FIXED ) { - double sum2 = 0; - for ( unsigned j = 0; j < inputMids.size(); ++j ) + // If we got here, we know that lbLb < 0 and ubUb + // > 0 There are four possible cases, depending on + // whether ubLb and lbUb are negative or positive + // (see Neurify paper, page 14). + + // Upper bound + if ( _symbolicLbOfUb[i] <= 0 ) { - if ( j == maxInputIndex ) - continue; - else - { - double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; - double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; - sum2 += ( std::exp( ujjstar ) - std::exp( ljjstar ) ) / ( ujjstar - ljjstar ); - } + // lbOfUb[i] < 0 < ubOfUb[i] + // Concretize the upper bound using the Ehler's-like approximation + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] = _symbolicUb[j * _size + i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] = _symbolicUpperBias[i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); + _symbolicUpperBias[i] -= _symbolicLbOfUb[i] * _symbolicUbOfUb[i] / + ( _symbolicUbOfUb[i] - _symbolicLbOfUb[i] ); } - return -val + val2 * sum2; + + // Lower bound: y >= coeff * x (varies continuously between y >= 0 and y >= alpha * x). + if ( _symbolicUbOfLb[i] <= 0 ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] *= coeff; + + _symbolicLowerBias[i] *= coeff; + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] = _symbolicLb[j * _size + i] * _symbolicUbOfLb[i] / + ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); + + _symbolicLowerBias[i] = _symbolicLowerBias[i] * _symbolicUbOfLb[i] / + ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); + } + + _symbolicLbOfLb[i] = 0; } else { - double ldijstar = inputLbs[di] - inputUbs[maxInputIndex]; - double udijstar = inputUbs[di] - inputLbs[maxInputIndex]; - return -val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / - ( udijstar - ldijstar ); - } - } -} - -double Layer::softmaxLSEUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) -{ - double li = outputLb[i]; - double ui = outputUb[i]; - + // The phase of this ReLU is fixed! + if ( reluPhase == RELU_PHASE_ACTIVE ) + { + // Active ReLU, bounds are propagated as is + } + else + { + // Inactive ReLU, returns zero + _symbolicLbOfLb[i] = 0; + _symbolicUbOfLb[i] = 0; + _symbolicLbOfUb[i] = 0; + _symbolicUbOfUb[i] = 0; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicLowerBias[i] = 0; + _symbolicUpperBias[i] = 0; + } + } + + if ( _symbolicLbOfUb[i] < 0 ) + _symbolicLbOfUb[i] = 0; + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) +{ + ASSERT( coeff >= 0 && coeff <= 1 ); + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + // Eliminate neurons are skipped + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + + continue; + } + + /* + There are two ways we can determine that a Sign has become fixed: + + 1. If the Sign's variable has been externally fixed + 2. lbLb >= 0 (Positive) or ubUb < 0 (Negative) + */ + PhaseStatus signPhase = PHASE_NOT_FIXED; + + // Has the f variable been eliminated or fixed? + if ( !FloatUtils::isNegative( _lb[i] ) ) + signPhase = SIGN_PHASE_POSITIVE; + else if ( FloatUtils::isNegative( _ub[i] ) ) + signPhase = SIGN_PHASE_NEGATIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A Sign initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + signPhase = SIGN_PHASE_POSITIVE; + } + else if ( FloatUtils::isNegative( sourceUb ) ) + { + signPhase = SIGN_PHASE_NEGATIVE; + } + + if ( signPhase == PHASE_NOT_FIXED ) + { + PhaseStatus upperSignPhase = PHASE_NOT_FIXED; + PhaseStatus lowerSignPhase = PHASE_NOT_FIXED; + + // If we got here, we know that lbLb < 0 and ubUb + // > 0 + + // Upper bound + if ( !FloatUtils::isNegative( _symbolicLbOfUb[i] ) ) + { + // The upper bound is strictly positive - turns into + // the constant 1 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] = 0; + + _symbolicUpperBias[i] = 1; + + upperSignPhase = SIGN_PHASE_POSITIVE; + } + else + { + // The upper bound's phase is not fixed, use parameterised + // parallelogram approximation: y >= 2 / l * ( 1 - coeff ) x + 1 + // (varies continuously between y <= 1 and y <= -2 / l * x + 1). + double factor = -2.0 / _symbolicLbOfLb[i] * ( 1 - coeff ); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicUb[j * _size + i] *= factor; + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= factor; + _symbolicUpperBias[i] += 1; + } + + // Lower bound + if ( FloatUtils::isNegative( _symbolicUbOfLb[i] ) ) + { + // The lower bound is strictly negative - turns into + // the constant -1 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _symbolicLb[j * _size + i] = 0; + + _symbolicLowerBias[i] = -1; + + lowerSignPhase = SIGN_PHASE_NEGATIVE; + } + else + { + // The lower bound's phase is not fixed, use parameterised + // parallelogram approximation: y >= 2 / u * ( 1 - coeff ) x - 1 + // (varies continuously between y >= -1 and y >= 2 / u * x - 1). + double factor = 2.0 / _symbolicUbOfUb[i] * ( 1 - coeff ); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= factor; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= factor; + _symbolicLowerBias[i] -= 1; + } + + if (upperSignPhase == PHASE_NOT_FIXED) + { + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = -1; + } + else + { + _symbolicUbOfUb[i] = 1; + _symbolicLbOfUb[i] = 1; + } + + if (lowerSignPhase == PHASE_NOT_FIXED) + { + _symbolicUbOfLb[i] = 1; + _symbolicLbOfLb[i] = -1; + } + else + { + _symbolicUbOfLb[i] = -1; + _symbolicLbOfLb[i] = -1; + } + } + else + { + // The phase of this Sign is fixed! + double constant = ( signPhase == SIGN_PHASE_POSITIVE ) ? 1 : -1; + + _symbolicLbOfLb[i] = constant; + _symbolicUbOfLb[i] = constant; + _symbolicLbOfUb[i] = constant; + _symbolicUbOfUb[i] = constant; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicLowerBias[i] = constant; + _symbolicUpperBias[i] = constant; + } + + if ( _symbolicLbOfLb[i] < -1 ) + _symbolicLbOfLb[i] = -1; + if ( _symbolicUbOfUb[i] > 1 ) + _symbolicUbOfUb[i] = 1; + + /* + We now have the tightest bounds we can for the sign + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) +{ + ASSERT( _alpha > 0 && _alpha < 1 ); + ASSERT( coeff >= 0 && coeff <= 1 ); + + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + /* + There are two ways we can determine that a LeakyReLU has become fixed: + + 1. If the LeakyReLU's variable has been externally fixed + 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) + */ + PhaseStatus leakyReluPhase = PHASE_NOT_FIXED; + + // Has the f variable been eliminated or fixed? + if ( FloatUtils::isPositive( _lb[i] ) ) + leakyReluPhase = RELU_PHASE_ACTIVE; + else if ( FloatUtils::isZero( _ub[i] ) ) + leakyReluPhase = RELU_PHASE_INACTIVE; + + ASSERT( _neuronToActivationSources.exists( i ) ); + NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); + + /* + A LeakyReLU initially "inherits" the symbolic bounds computed + for its input variable + */ + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + sourceIndex._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + sourceIndex._neuron]; + } + _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[sourceIndex._neuron]; + _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[sourceIndex._neuron]; + + double sourceLb = sourceLayer->getLb( sourceIndex._neuron ); + double sourceUb = sourceLayer->getUb( sourceIndex._neuron ); + + _symbolicLbOfLb[i] = sourceLayer->getSymbolicLbOfLb( sourceIndex._neuron ); + _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); + _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); + _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); + + // Has the b variable been fixed? + if ( !FloatUtils::isNegative( sourceLb ) ) + { + leakyReluPhase = RELU_PHASE_ACTIVE; + } + else if ( !FloatUtils::isPositive( sourceUb ) ) + { + leakyReluPhase = RELU_PHASE_INACTIVE; + } + + if ( leakyReluPhase == PHASE_NOT_FIXED ) + { + // LeakyReLU not fixed + // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) + // Concrete upper bound: x_f <= ub_b + double width = sourceUb - sourceLb; + double weight = ( sourceUb - _alpha * sourceLb ) / width; + + if ( _alpha <= 1 ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= weight; + } + + // Do the same for the bias, and then adjust + _symbolicUpperBias[i] *= weight; + _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; + + + // For the lower bound, in general, x_f >= lambda * x_b, where + // 0 <= lambda <= 1, would be a sound lower bound. We + // use the heuristic described in section 4.1 of + // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + // to set the value of lambda (either 0 or 1 is considered). + if ( sourceUb > sourceLb ) + { + // lambda = 1 + // Symbolic lower bound: x_f >= x_b + // Concrete lower bound: x_f >= sourceLb + + // Lower bounds are passed as is + } + else + { + // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = alpha and lambda = 1). + // Symbolic lower bound: x_f >= ((1 - coeff) * weight + alpha) x_b + // Concrete lower bound: x_f >= 0 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; + } + + _symbolicLowerBias[i] *= _alpha; + } + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= weight; + } + + // Do the same for the bias, and then adjust + _symbolicLowerBias[i] *= weight; + _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; + + if ( sourceUb > sourceLb ) + { + // Upper bounds are passed as is + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= _alpha; + } + + _symbolicUpperBias[i] *= _alpha; + } + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + } + else + { + // The phase of this LeakyReLU is fixed! + if ( leakyReluPhase == RELU_PHASE_ACTIVE ) + { + // Positive LeakyReLU, bounds are propagated as is + } + else + { + // Negative LeakyReLU, bounds are multiplied by _alpha + _symbolicLbOfLb[i] *= _alpha; + _symbolicUbOfLb[i] *= _alpha; + _symbolicLbOfUb[i] *= _alpha; + _symbolicUbOfUb[i] *= _alpha; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] *= _alpha; + _symbolicLb[j * _size + i] *= _alpha; + } + + _symbolicLowerBias[i] *= _alpha; + _symbolicUpperBias[i] *= _alpha; + } + } + + if ( _symbolicUbOfUb[i] > sourceUb ) + _symbolicUbOfUb[i] = sourceUb; + if ( _symbolicLbOfLb[i] < _alpha * sourceLb ) + _symbolicLbOfLb[i] = _alpha * sourceLb; + + /* + We now have the tightest bounds we can for the leakyRelu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +double Layer::softmaxLSELowerBound( const Vector &inputs, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + double sum = 0; + for ( unsigned j = 0; j < inputs.size(); ++j ) + { + double lj = inputLbs[j]; + double uj = inputUbs[j]; + double xj = inputs[j]; + sum += + ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + } + + return std::exp( inputs[i] ) / sum; +} + +double Layer::softmaxdLSELowerBound( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double val = 0; + if ( i == di ) + val += softmaxLSELowerBound( inputMids, inputLbs, inputUbs, i ); + + double ldi = inputLbs[di]; + double udi = inputUbs[di]; + + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + double lj = inputLbs[j]; + double uj = inputUbs[j]; + double xj = inputMids[j]; + + sum += + ( uj - xj ) / ( uj - lj ) * std::exp( lj ) + ( xj - lj ) / ( uj - lj ) * std::exp( uj ); + } + + val -= std::exp( inputMids[i] ) / ( sum * sum ) * ( std::exp( udi ) - std::exp( ldi ) ) / + ( udi - ldi ); + + return val; +} + +double Layer::softmaxLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) +{ + double max = FloatUtils::negativeInfinity(); + unsigned maxInputIndex = 0; + unsigned index = 0; + for ( const auto &mid : inputMids ) + { + if ( mid > max ) + { + max = mid; + maxInputIndex = index; + } + ++index; + } + + if ( maxInputIndex == i ) + return softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); + else + { + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + sum += 1; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + + sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + + ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); + } + } + + return std::exp( inputMids[i] - inputMids[maxInputIndex] ) / sum; + } +} + +double Layer::softmaxdLSELowerBound2( const Vector &inputMids, + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) +{ + double max = FloatUtils::negativeInfinity(); + unsigned maxInputIndex = 0; + unsigned index = 0; + for ( const auto &mid : inputMids ) + { + if ( mid > max ) + { + max = mid; + maxInputIndex = index; + } + ++index; + } + + if ( maxInputIndex == i ) + return softmaxdERLowerBound( inputMids, inputLbs, inputUbs, i, di ); + else + { + double val = softmaxLSELowerBound2( inputMids, inputLbs, inputUbs, i ); + + double sum = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + sum += 1; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + double xjjstar = inputMids[j] - inputMids[maxInputIndex]; + sum += ( ujjstar - xjjstar ) / ( ujjstar - ljjstar ) * std::exp( ljjstar ) + + ( xjjstar - ljjstar ) / ( ujjstar - ljjstar ) * std::exp( ujjstar ); + } + } + double val2 = std::exp( inputMids[i] - inputMids[maxInputIndex] ) / ( sum * sum ); + + if ( i == di ) + { + double ldijstar = inputLbs[i] - inputUbs[maxInputIndex]; + double udijstar = inputUbs[i] - inputLbs[maxInputIndex]; + return val - + val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / ( udijstar - ldijstar ); + } + else if ( maxInputIndex == di ) + { + double sum2 = 0; + for ( unsigned j = 0; j < inputMids.size(); ++j ) + { + if ( j == maxInputIndex ) + continue; + else + { + double ljjstar = inputLbs[j] - inputUbs[maxInputIndex]; + double ujjstar = inputUbs[j] - inputLbs[maxInputIndex]; + sum2 += ( std::exp( ujjstar ) - std::exp( ljjstar ) ) / ( ujjstar - ljjstar ); + } + } + return -val + val2 * sum2; + } + else + { + double ldijstar = inputLbs[di] - inputUbs[maxInputIndex]; + double udijstar = inputUbs[di] - inputLbs[maxInputIndex]; + return -val2 * ( std::exp( udijstar ) - std::exp( ldijstar ) ) / + ( udijstar - ldijstar ); + } + } +} + +double Layer::softmaxLSEUpperBound( const Vector &inputs, + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) +{ + double li = outputLb[i]; + double ui = outputUb[i]; + Vector inputTilda; SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); @@ -3650,7 +4294,6 @@ double Layer::softmaxdERUpperBound( const Vector &inputMids, double li = outputLb[i]; double ui = outputUb[i]; - if ( i == di ) { double val2 = -1; @@ -3980,7 +4623,6 @@ String Layer::typeToString( Type type ) return "BILINEAR"; break; - default: return "UNKNOWN TYPE"; break; diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index 4d1990ebc5..965f85ef45 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -2,7 +2,7 @@ /*! \file Layer.h ** \verbatim ** Top contributors (to current version): - ** Guy Katz, Ido Shmuel + ** Guy Katz ** This file is part of the Marabou project. ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS ** in the top-level source directory) and their institutional affiliations. @@ -137,6 +137,7 @@ class Layer void obtainCurrentBounds( const Query &inputQuery ); void obtainCurrentBounds(); void computeSymbolicBounds(); + void computeParameterisedSymbolicBounds( double coeff ); void computeIntervalArithmeticBounds(); /* @@ -291,6 +292,14 @@ class Layer void computeSymbolicBoundsForSoftmax(); void computeSymbolicBoundsForBilinear(); void computeSymbolicBoundsDefault(); + + + /* + Helper functions for parameterised symbolic bound tightening + */ + void computeParameterisedSymbolicBoundsForRelu( double coeff ); + void computeParameterisedSymbolicBoundsForSign( double coeff ); + void computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ); /* Helper functions for interval bound tightening diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 04b9b20683..d41ed22de4 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -220,6 +220,12 @@ void NetworkLevelReasoner::symbolicBoundPropagation() _layerIndexToLayer[i]->computeSymbolicBounds(); } +void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( Map coeffs ) +{ + for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) + _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( coeffs[i] ); +} + void NetworkLevelReasoner::deepPolyPropagation() { if ( _deepPolyAnalysis == nullptr ) @@ -237,6 +243,12 @@ void NetworkLevelReasoner::lpRelaxationPropagation() Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE ) lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer, true ); + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP ) + lpFormulator.optimizeBoundsWithInvprop( _layerIndexToLayer ); + else if ( Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) + lpFormulator.optimizeBoundsWithPreimageApproximation( _layerIndexToLayer ); else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::LP_RELAXATION ) lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer ); diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index fa125f0116..f84f0f12ea 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -24,9 +24,9 @@ #include "MatrixMultiplication.h" #include "NeuronIndex.h" #include "PiecewiseLinearFunctionType.h" +#include "PolygonalTightening.h" #include "Tightening.h" #include "Vector.h" -#include "PolygonalTightening.h" #include @@ -103,6 +103,12 @@ class NetworkLevelReasoner : public LayerOwner expressions and obtain tighter bounds (e.g., if the upper bound on the upper bound of a ReLU node is negative, that ReLU is inactive and its output can be set to 0. + + - Parametrised Symbolic: For certain activation functions, there + is a continuum of valid symbolic bounds. We receive a map of + coefficients in range [0, 1] for every layer index, then compute + the parameterised symbolic bounds (or default to regular + symbolic bounds if parameterised bounds not implemented). - LP Relaxation: invoking an LP solver on a series of LP relaxations of the problem we're trying to solve, and @@ -131,6 +137,7 @@ class NetworkLevelReasoner : public LayerOwner void obtainCurrentBounds(); void intervalArithmeticBoundPropagation(); void symbolicBoundPropagation(); + void parameterisedSymbolicBoundPropagation( Map coeffs ); void deepPolyPropagation(); void lpRelaxationPropagation(); void LPTighteningForOneLayer( unsigned targetIndex ); From cd5f2dec2c2a1a7203fcae9e30a1bb443c58fd3b Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 10 Oct 2024 20:55:21 +0300 Subject: [PATCH 19/72] 0-6-6: New constants for PreimageApproximation (random seeds). 0-6-6: New constants for PreimageApproximation (random seeds). --- src/configuration/GlobalConfiguration.cpp | 2 ++ src/configuration/GlobalConfiguration.h | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index d8529ee7c1..6cb1f6d8de 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -67,6 +67,8 @@ const unsigned GlobalConfiguration::ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS = const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index ca06a28735..e0daaf133f 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -150,6 +150,12 @@ class GlobalConfiguration // Random seed for generating simulation values. static const unsigned SIMULATION_RANDOM_SEED; + + // Random seed for EstimateVolume procedure (PreimageApproximation). + static const unsigned VOLUME_ESTIMATION_RANDOM_SEED; + + // Random seed for PreimageApproximation optimization. + static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED; // How often should projected steepest edge reset the reference space? static const unsigned PSE_ITERATIONS_BEFORE_RESET; From d86c0a3d1ae2c65c2e1a0d3ff407ab2cda63e03f Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 10 Oct 2024 20:57:24 +0300 Subject: [PATCH 20/72] 0-6-7: TEMPORARY CHANGE - use boost v1.85 for Differential Evolution algorithm (gradient-free optimization for PreimageApproximation). 0-6-7: TEMPORARY CHANGE - use boost v1.85 for Differential Evolution algorithm (gradient-free optimization for PreimageApproximation). --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8db91b2abb..f0d8e1a8e1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -82,7 +82,7 @@ include_directories(SYSTEM ${CVC4_DIR} ${CVC4_DIR}/include) # Avoid using deprecated operations add_definitions(-DBOOST_NO_CXX98_FUNCTION_BASE) -set(BOOST_VERSION 1.84.0) +set(BOOST_VERSION 1.85.0) set(BOOST_DIR "${TOOLS_DIR}/boost-${BOOST_VERSION}") if (MSVC) set(BOOST_ROOT "${BOOST_DIR}/win_installed") From 4467e09cf655f448e9410717edcff365a61eac1b Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 14 Oct 2024 12:14:36 +0300 Subject: [PATCH 21/72] 0-6-8: Revert last change. 0-6-8: Revert last change. --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f0d8e1a8e1..8db91b2abb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -82,7 +82,7 @@ include_directories(SYSTEM ${CVC4_DIR} ${CVC4_DIR}/include) # Avoid using deprecated operations add_definitions(-DBOOST_NO_CXX98_FUNCTION_BASE) -set(BOOST_VERSION 1.85.0) +set(BOOST_VERSION 1.84.0) set(BOOST_DIR "${TOOLS_DIR}/boost-${BOOST_VERSION}") if (MSVC) set(BOOST_ROOT "${BOOST_DIR}/win_installed") From 88e339a587940d40c8710408a15d7211a2d668e7 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 14 Oct 2024 12:17:18 +0300 Subject: [PATCH 22/72] 0-6-9: Adapt DifferentialEvolution from Boost v1.85. 0-6-9: Adapt DifferentialEvolution from Boost v1.85. --- src/nlr/DifferentialEvolution.h | 528 ++++++++++++++++++++++++++++++++ 1 file changed, 528 insertions(+) create mode 100644 src/nlr/DifferentialEvolution.h diff --git a/src/nlr/DifferentialEvolution.h b/src/nlr/DifferentialEvolution.h new file mode 100644 index 0000000000..030f338a84 --- /dev/null +++ b/src/nlr/DifferentialEvolution.h @@ -0,0 +1,528 @@ +/* + * Copyright Nick Thompson, 2024 + * Use, modification and distribution are subject to the + * Boost Software License, Version 1.0. (See accompanying file + * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) + */ +#ifndef DIFFERENTIAL_EVOLUTION +#define DIFFERENTIAL_EVOLUTION + +#include "FloatUtils.h" + +#include // for std::sort +#include +#include +#include +#include +#include +#include +#include +#include +#include // for std::false_type +#include +#include + +namespace NLR { + + template struct has_resize : std::false_type {}; + + template struct has_resize().resize( size_t{} ) )>> : std::true_type {}; + + template constexpr bool has_resize_v = has_resize::value; + + template + void validate_bounds( ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds ) + { + std::ostringstream oss; + + if ( lower_bounds.size() == 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The dimension of the problem cannot be zero."; + throw std::domain_error( oss.str() ); + } + + if ( upper_bounds.size() != lower_bounds.size() ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be the same number of lower bounds as upper bounds, but given "; + oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() << " lower bounds."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < lower_bounds.size(); ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + if ( lb > ub ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be greater than or equal to the lower bound, but the upper bound is " << ub + << " and the lower is " << lb << "."; + throw std::domain_error( oss.str() ); + } + + if ( !FloatUtils::isFinite( lb ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The lower bound must be finite, but got " << lb << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); + } + + if ( !FloatUtils::isFinite( ub ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be finite, but got " << ub << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); + } + } + } + + template + std::vector random_initial_population( ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds, + size_t initial_population_size, URBG &&gen ) + { + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + constexpr bool has_resize = has_resize_v; + std::vector population( initial_population_size ); + auto const dimension = lower_bounds.size(); + + for ( size_t i = 0; i < population.size(); ++i ) + { + if constexpr ( has_resize ) + population[i].resize( dimension ); + else + { + // Argument type must be known at compile-time; like std::array: + if ( population[i].size() != dimension ) + { + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": For containers which do not have resize, the default size must be the same as the dimension, "; + oss << "but the default container size is " << population[i].size() << " and the dimension of the problem is " + << dimension << "."; + oss << " The function argument container type is " << typeid( ArgumentContainer ).name() << ".\n"; + throw std::runtime_error( oss.str() ); + } + } + } + + // Why don't we provide an option to initialize with (say) a Gaussian distribution? + // > If the optimum's location is fairly well known, + // > a Gaussian distribution may prove somewhat faster, although it + // > may also increase the probability that the population will converge prematurely. + // > In general, uniform distributions are preferred, since they best reflect + // > the lack of knowledge about the optimum's location. + // - Differential Evolution: A Practical Approach to Global Optimization + // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are preferable. + // So this is something that could be investigated and potentially improved. + std::uniform_real_distribution dis( DimensionlessReal( 0 ), DimensionlessReal( 1 ) ); + for ( size_t i = 0; i < population.size(); ++i ) + { + for ( size_t j = 0; j < dimension; ++j ) + { + auto const &lb = lower_bounds[j]; + auto const &ub = upper_bounds[j]; + population[i][j] = lb + dis( gen ) * ( ub - lb ); + } + } + + return population; + } + + template + void validate_initial_guess( ArgumentContainer const &initial_guess, + ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds) + { + std::ostringstream oss; + auto const dimension = lower_bounds.size(); + + if ( initial_guess.size() != dimension ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The initial guess must have the same dimensions as the problem,"; + oss << ", but the problem size is " << dimension << " and the initial guess has " << initial_guess.size() + << " elements."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < dimension; ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + + if ( !FloatUtils::isFinite( initial_guess[i] ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << ", the initial guess is " << initial_guess[i] + << ", make sure all elements of the initial guess are finite."; + throw std::domain_error( oss.str() ); + } + + if ( initial_guess[i] < lb || initial_guess[i] > ub ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << " the initial guess " << initial_guess[i] << " is not in the bounds [" << lb << ", " + << ub << "]."; + throw std::domain_error( oss.str() ); + } + } + } + + // Return indices corresponding to the minimum function values. + template + std::vector best_indices( std::vector const &function_values ) + { + const size_t n = function_values.size(); + std::vector indices( n ); + + for ( size_t i = 0; i < n; ++i ) + { + indices[i] = i; + } + + std::sort( indices.begin(), + indices.end(), + [&]( size_t a, size_t b ) + { + if ( FloatUtils::isNan( function_values[a] ) ) + { + return false; + } + if ( FloatUtils::isNan( function_values[b] ) ) + { + return true; + } + return function_values[a] < function_values[b]; + }); + + return indices; + } + + template + auto weighted_lehmer_mean( RandomAccessContainer const & values, + RandomAccessContainer const & weights) + { + if ( values.size() != weights.size() ) + { + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be the same number of weights as values, but got " << values.size() + << " values and " << weights.size() << " weights."; + throw std::logic_error( oss.str() ); + } + + if ( values.size() == 0 ) + { + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must at least one value provided."; + throw std::logic_error( oss.str() ); + } + + using Real = typename RandomAccessContainer::value_type; + Real numerator = 0; + Real denominator = 0; + + for ( size_t i = 0; i < values.size(); ++i ) + { + if ( weights[i] < 0 || !FloatUtils::isFinite( weights[i] ) ) + { + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": All weights must be positive and finite, but got received weight " << weights[i] + << " at index " << i << " of " << weights.size() << "."; + throw std::domain_error( oss.str() ); + } + + Real tmp = weights[i] * values[i]; + numerator += tmp * values[i]; + denominator += tmp; + } + return numerator / denominator; + } + + // Storn, R., Price, K. (1997). Differential evolution-a simple and efficient heuristic for global optimization over + // continuous spaces. + // Journal of global optimization, 11, 341-359. + // See: + // https://www.cp.eng.chula.ac.th/~prabhas//teaching/ec/ec2012/storn_price_de.pdf + + // We provide the parameters in a struct-there are too many of them and they are too unwieldy to pass individually: + template struct differential_evolution_parameters + { + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + ArgumentContainer lower_bounds; + ArgumentContainer upper_bounds; + + // mutation factor is also called scale factor or just F in the literature: + DimensionlessReal mutation_factor = static_cast( 0.65 ); + DimensionlessReal crossover_probability = static_cast( 0.5 ); + + // Population in each generation: + size_t NP = 500; + size_t max_generations = 1000; + ArgumentContainer const *initial_guess = nullptr; + unsigned threads = std::thread::hardware_concurrency(); + }; + + template + void validate_differential_evolution_parameters( differential_evolution_parameters const &de_params ) + { + std::ostringstream oss; + validate_bounds( de_params.lower_bounds, de_params.upper_bounds ); + if ( de_params.NP < 4 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The population size must be at least 4, but requested population size of " << de_params.NP << "."; + throw std::invalid_argument( oss.str() ); + } + + // From: "Differential Evolution: A Practical Approach to Global Optimization (Natural Computing Series)" + // > The scale factor, F in (0,1+), is a positive real number that controls the rate at which the population evolves. + // > While there is no upper limit on F, effective values are seldom greater than 1.0. + // ... + // Also see "Limits on F", Section 2.5.1: + // > This discontinuity at F = 1 reduces the number of mutants by half and can result in erratic convergence... + auto F = de_params.mutation_factor; + if ( FloatUtils::isNan( F ) || F >= 1 || F <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": F in (0, 1) is required, but got F=" << F << "."; + throw std::domain_error( oss.str() ); + } + + if ( de_params.max_generations < 1 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be at least one generation."; + throw std::invalid_argument( oss.str() ); + } + + if ( de_params.initial_guess ) + { + validate_initial_guess( *de_params.initial_guess, de_params.lower_bounds, de_params.upper_bounds ); + } + + if ( de_params.threads == 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be at least one thread."; + throw std::invalid_argument( oss.str() ); + } + } + + template + ArgumentContainer differential_evolution( const Func cost_function, + differential_evolution_parameters const &de_params, + URBG &gen, + std::invoke_result_t target_value + = std::numeric_limits>::quiet_NaN(), + std::atomic *cancellation = nullptr, + std::vector>> *queries = nullptr, + std::atomic> *current_minimum_cost = nullptr) + { + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + using ResultType = std::invoke_result_t; + + validate_differential_evolution_parameters( de_params ); + const size_t dimension = de_params.lower_bounds.size(); + auto NP = de_params.NP; + auto population = random_initial_population( de_params.lower_bounds, de_params.upper_bounds, NP, gen ); + + if ( de_params.initial_guess ) + { + population[0] = *de_params.initial_guess; + } + + std::vector cost( NP, std::numeric_limits::quiet_NaN() ); + std::atomic target_attained = false; + + // This mutex is only used if the queries are stored: + std::mutex mt; + + std::vector thread_pool; + auto const threads = de_params.threads; + for ( size_t j = 0; j < threads; ++j ) + { + // Note that if some members of the population take way longer to compute, + // then this parallelization strategy is very suboptimal. + // However, we tried using std::async (which should be robust to this particular problem), + // but the overhead was just totally unacceptable on ARM Macs (the only platform tested). + // As the economists say "there are no solutions, only tradeoffs". + + thread_pool.emplace_back( [&, j]() + { + for ( size_t i = j; i < cost.size(); i += threads ) + { + cost[i] = cost_function( population[i] ); + if ( current_minimum_cost && cost[i] < *current_minimum_cost ) + { + *current_minimum_cost = cost[i]; + } + if ( queries ) + { + std::scoped_lock lock( mt ); + queries->push_back( std::make_pair( population[i], cost[i] ) ); + } + + if ( !FloatUtils::isNan( target_value ) && cost[i] <= target_value ) + { + target_attained = true; + } + } + }); + } + + for ( auto &thread : thread_pool ) + { + thread.join(); + } + + std::vector trial_vectors( NP ); + for ( size_t i = 0; i < NP; ++i ) + { + if constexpr ( has_resize_v ) + { + trial_vectors[i].resize( dimension ); + } + } + + std::vector thread_generators( threads ); + for ( size_t j = 0; j < threads; ++j ) + { + thread_generators[j].seed( gen() ); + } + + // std::vector isn't threadsafe! + std::vector updated_indices( NP, 0 ); + + for ( size_t generation = 0; generation < de_params.max_generations; ++generation ) + { + if ( cancellation && *cancellation ) + { + break; + } + if ( target_attained ) + { + break; + } + + thread_pool.resize( 0 ); + for ( size_t j = 0; j < threads; ++j ) + { + thread_pool.emplace_back( [&, j]() + { + auto& tlg = thread_generators[j]; + std::uniform_real_distribution unif01( DimensionlessReal( 0 ), DimensionlessReal( 1 ) ); + + for ( size_t i = j; i < cost.size(); i += threads ) + { + if ( target_attained ) + { + return; + } + if ( cancellation && *cancellation ) + { + return; + } + + size_t r1, r2, r3; + do + { + r1 = tlg() % NP; + } while ( r1 == i ); + + do + { + r2 = tlg() % NP; + } while ( r2 == i || r2 == r1 ); + + do + { + r3 = tlg() % NP; + } while ( r3 == i || r3 == r2 || r3 == r1 ); + + + for ( size_t k = 0; k < dimension; ++k ) + { + // See equation (4) of the reference: + auto guaranteed_changed_idx = tlg() % dimension; + if ( unif01( tlg ) < de_params.crossover_probability || k == guaranteed_changed_idx ) + { + auto tmp = population[r1][k] + de_params.mutation_factor * ( population[r2][k] - population[r3][k] ); + auto const &lb = de_params.lower_bounds[k]; + auto const &ub = de_params.upper_bounds[k]; + // Some others recommend regenerating the indices rather than clamping; + // I dunno seems like it could get stuck regenerating . . . + trial_vectors[i][k] = std::clamp( tmp, lb, ub ); + } + else + { + trial_vectors[i][k] = population[i][k]; + } + } + + auto const trial_cost = cost_function( trial_vectors[i] ); + if ( FloatUtils::isNan( trial_cost ) ) + { + continue; + } + if ( queries ) + { + std::scoped_lock lock( mt ); + queries->push_back( std::make_pair( trial_vectors[i], trial_cost ) ); + } + + if ( trial_cost < cost[i] || FloatUtils::isNan( cost[i] ) ) + { + cost[i] = trial_cost; + if ( !FloatUtils::isNan( target_value ) && cost[i] <= target_value ) + { + target_attained = true; + } + + if ( current_minimum_cost && cost[i] < *current_minimum_cost ) + { + *current_minimum_cost = cost[i]; + } + + // Can't do this! It's a race condition! + //population[i] = trial_vectors[i]; + // Instead mark all the indices that need to be updated: + updated_indices[i] = 1; + } + } + }); + } + for ( auto &thread : thread_pool ) + { + thread.join(); + } + + for ( size_t i = 0; i < NP; ++i ) + { + if ( updated_indices[i] ) + { + population[i] = trial_vectors[i]; + updated_indices[i] = 0; + } + } + } + + auto it = std::min_element( cost.begin(), cost.end() ); + return population[std::distance( cost.begin(), it )]; + } + +} // namespace NLR +#endif From 8031a41a30eb232f16719b3aff277b568afa429c Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 14 Oct 2024 12:19:19 +0300 Subject: [PATCH 23/72] 0-6-10: Current state of PreimageApproximation (EstimateVolume implemented). 0-6-10: Current state of PreimageApproximation (EstimateVolume implemented). --- src/nlr/LPFormulator.cpp | 67 ++++++++++++++++++++++++++++++++++------ src/nlr/LPFormulator.h | 2 +- src/nlr/Layer.cpp | 41 ++++++++++++++++++++++-- src/nlr/Layer.h | 3 +- 4 files changed, 99 insertions(+), 14 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 6876586a57..6b7b39c929 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -339,12 +339,7 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map void LPFormulator::optimizeBoundsWithInvprop( const Map &layers ) { - Map coeffs; - for ( const auto &pair : layers ) - { - //coeffs.insert( pair.first, GlobalConfiguration::INVPROP_INITIAL_ALPHA ); - coeffs.insert( pair.first, 0.5 ); - } + } void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map &layers ) @@ -353,7 +348,7 @@ void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map>(); + auto opt_params = differential_evolution_parameters>(); unsigned depth = layers.size(); opt_params.lower_bounds.resize( depth, 0 ); opt_params.upper_bounds.resize( depth, 1 ); @@ -361,14 +356,66 @@ void LPFormulator::optimizeBoundsWithPreimageApproximation( const MapcomputeParameterisedSymbolicBounds( optimal_coeffs[i], true ); + optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); } double LPFormulator::EstimateVolume( const Map &layers, std::vector coeffs ) { - return 0; + // First, run parameterised symbolic bound propagation. + for ( unsigned i = 0; i < layers.size(); ++i ) + layers[i]->computeParameterisedSymbolicBounds( coeffs[i] ); + + std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); + double average = 0; + for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) + { + + // Sample point from known bounds. + Map point; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + const Layer *layer = pair.second; + for ( unsigned index = 0; index < layer->getSize(); ++index ) + { + if ( layer->neuronEliminated( index ) ) + continue; + + const NeuronIndex neuron( layerIndex, index ); + double lb = layer->getLb( index ); + double ub = layer->getUb( index ); + std::uniform_real_distribution<> dis( lb, ub ); + point.insert( neuron , dis( rng ) ); + } + } + + // Compute the average sigmoid of log-sum-exp of points' difference from bounding hyperplanes. + double sum_exp = 0; + for ( auto pair : layers ) + { + const Layer *layer = pair.second; + for ( unsigned index = 0; index < layer->getSize(); ++index ) + { + if ( layer->neuronEliminated( index ) ) + continue; + + double margin = layer->calculateDifferenceFromSymbolic( point, index ); + if ( FloatUtils::wellFormed( std::exp( margin ) ) ) + sum_exp += std::exp( margin ); + } + } + + double slse = 1 / ( 1 + ( 1 / sum_exp ) ); + average += slse / GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; + } + + return average; } void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index 4184839d9c..64fd022fc4 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -16,6 +16,7 @@ #ifndef __LPFormulator_h__ #define __LPFormulator_h__ +#include "DifferentialEvolution.h" #include "GurobiWrapper.h" #include "LayerOwner.h" #include "Map.h" @@ -26,7 +27,6 @@ #include #include #include -#include #include #include diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 6fdb081cba..644c056f8d 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -305,7 +305,7 @@ void Layer::computeSimulations() unsigned sourceSize = sourceLayerEntry.second; const double *weights = _layerToWeights[sourceLayerEntry.first]; - for ( unsigned i = 0; i < _size; i++ ) + for ( unsigned i = 0; i < _size; ++i ) { for ( unsigned j = 0; j < simulationSize; ++j ) _simulations[i][j] = _bias[i]; @@ -3362,7 +3362,23 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } -void Layer::computeParameterisedSymbolicBounds( double coeff ) +double Layer::calculateDifferenceFromSymbolic( Map &point, unsigned i ) const +{ + double lower_sum = _symbolicLowerBias[i]; + double upper_sum = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + const NeuronIndex neuron( 0, j ); + lower_sum += _symbolicLb[j * _size + i] * point[neuron]; + upper_sum += _symbolicUb[j * _size + i] * point[neuron]; + } + + const NeuronIndex currentNeuron( _layerIndex, i ); + return FloatUtils::max( point[currentNeuron] - upper_sum , 0 ) + FloatUtils::max( lower_sum - point[currentNeuron] , 0 ); +} + +void Layer::computeParameterisedSymbolicBounds( double coeff, bool receive ) { switch ( _type ) { @@ -3382,6 +3398,27 @@ void Layer::computeParameterisedSymbolicBounds( double coeff ) computeSymbolicBounds(); break; } + + if ( receive ) + { + for ( unsigned i = 0; i < _size; ++i ) + { + Map lower_polygonal_tightening; + Map upper_polygonal_tightening; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + const NeuronIndex neuron( 0, j ); + lower_polygonal_tightening.insert( neuron, _symbolicLb[j * _size + i] ); + upper_polygonal_tightening.insert( neuron, _symbolicUb[j * _size + i] ); + } + + _layerOwner->receivePolygonalTighterBound( + PolygonalTightening( lower_polygonal_tightening, _symbolicLowerBias[i], PolygonalTightening::LB ) ); + _layerOwner->receivePolygonalTighterBound( + PolygonalTightening( upper_polygonal_tightening, _symbolicUpperBias[i], PolygonalTightening::UB ) ); + } + } } void Layer::computeParameterisedSymbolicBoundsForRelu( double coeff ) diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index 965f85ef45..aa318e9181 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -137,7 +137,8 @@ class Layer void obtainCurrentBounds( const Query &inputQuery ); void obtainCurrentBounds(); void computeSymbolicBounds(); - void computeParameterisedSymbolicBounds( double coeff ); + void computeParameterisedSymbolicBounds( double coeff, bool receive = false ); + double calculateDifferenceFromSymbolic( Map &point, unsigned i ) const; void computeIntervalArithmeticBounds(); /* From dd584e7adb57e0512c3f640459b2924814aa809d Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 24 Oct 2024 20:12:00 +0300 Subject: [PATCH 24/72] 0-6-11: Add constants for PreimageApproximation. 0-6-11: Add constants for PreimageApproximation. --- src/configuration/GlobalConfiguration.cpp | 5 ++++- src/configuration/GlobalConfiguration.h | 9 +++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index 6cb1f6d8de..1a7d133f40 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -69,6 +69,9 @@ const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 1000; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 1000; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.1; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; @@ -83,7 +86,7 @@ const bool GlobalConfiguration::PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCES const bool GlobalConfiguration::NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; const double GlobalConfiguration::PREPROCESSOR_ALMOST_FIXED_THRESHOLD = 0.00001; -const unsigned GlobalConfiguration::PREPROCESSSING_MAX_TIGHTEING_ROUND = 1000; +const unsigned GlobalConfiguration::PREPROCESSSING_MAX_TIGHTEING_ROUND = 10; const bool GlobalConfiguration::WARM_START = false; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index e0daaf133f..a779b93937 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -154,8 +154,17 @@ class GlobalConfiguration // Random seed for EstimateVolume procedure (PreimageApproximation). static const unsigned VOLUME_ESTIMATION_RANDOM_SEED; + // Number of iterations for EstimateVolume procedure (PreimageApproximation). + static const unsigned VOLUME_ESTIMATION_ITERATIONS; + // Random seed for PreimageApproximation optimization. static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED; + + // Maximum iterations for PreimageApproximation optimization. + static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + + // Step size for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; // How often should projected steepest edge reset the reference space? static const unsigned PSE_ITERATIONS_BEFORE_RESET; From cd52524f05f6be9fa3f65d2cef029143db42086c Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 24 Oct 2024 20:14:20 +0300 Subject: [PATCH 25/72] 0-6-12: Current state of PreimageApproximation (implement coordinate descent). 0-6-12: Current state of PreimageApproximation (implement coordinate descent). --- src/nlr/CoordinateDescent.h | 276 ++++++++++++++++++++++++++++++++++++ src/nlr/LPFormulator.cpp | 5 +- src/nlr/LPFormulator.h | 2 +- 3 files changed, 279 insertions(+), 4 deletions(-) create mode 100644 src/nlr/CoordinateDescent.h diff --git a/src/nlr/CoordinateDescent.h b/src/nlr/CoordinateDescent.h new file mode 100644 index 0000000000..dd9d1c0fd1 --- /dev/null +++ b/src/nlr/CoordinateDescent.h @@ -0,0 +1,276 @@ +/* + * Copyright Nick Thompson, 2024 + * Use, modification and distribution are subject to the + * Boost Software License, Version 1.0. (See accompanying file + * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) + */ +#ifndef COORDINATE_DESCENT +#define COORDINATE_DESCENT + +#include "FloatUtils.h" +#include "GlobalConfiguration.h" + +#include // for std::sort +#include +#include +#include +#include +#include +#include +#include // for std::false_type +#include +#include + +namespace NLR { + + template struct has_resize : std::false_type {}; + + template struct has_resize().resize( size_t{} ) )>> : std::true_type {}; + + template constexpr bool has_resize_v = has_resize::value; + + template + void validate_bounds( ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds ) + { + std::ostringstream oss; + + if ( lower_bounds.size() == 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The dimension of the problem cannot be zero."; + throw std::domain_error( oss.str() ); + } + + if ( upper_bounds.size() != lower_bounds.size() ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be the same number of lower bounds as upper bounds, but given "; + oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() << " lower bounds."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < lower_bounds.size(); ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + if ( lb > ub ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be greater than or equal to the lower bound, but the upper bound is " << ub + << " and the lower is " << lb << "."; + throw std::domain_error( oss.str() ); + } + + if ( !FloatUtils::isFinite( lb ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The lower bound must be finite, but got " << lb << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); + } + + if ( !FloatUtils::isFinite( ub ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be finite, but got " << ub << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); + } + } + } + + template + std::vector random_initial_population( ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds, + size_t initial_population_size, URBG &&gen ) + { + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + constexpr bool has_resize = has_resize_v; + std::vector population( initial_population_size ); + auto const dimension = lower_bounds.size(); + + for ( size_t i = 0; i < population.size(); ++i ) + { + if constexpr ( has_resize ) + population[i].resize( dimension ); + else + { + // Argument type must be known at compile-time; like std::array: + if ( population[i].size() != dimension ) + { + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": For containers which do not have resize, the default size must be the same as the dimension, "; + oss << "but the default container size is " << population[i].size() << " and the dimension of the problem is " + << dimension << "."; + oss << " The function argument container type is " << typeid( ArgumentContainer ).name() << ".\n"; + throw std::runtime_error( oss.str() ); + } + } + } + + // Why don't we provide an option to initialize with (say) a Gaussian distribution? + // > If the optimum's location is fairly well known, + // > a Gaussian distribution may prove somewhat faster, although it + // > may also increase the probability that the population will converge prematurely. + // > In general, uniform distributions are preferred, since they best reflect + // > the lack of knowledge about the optimum's location. + // - Differential Evolution: A Practical Approach to Global Optimization + // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are preferable. + // So this is something that could be investigated and potentially improved. + std::uniform_real_distribution dis( DimensionlessReal( 0 ), DimensionlessReal( 1 ) ); + for ( size_t i = 0; i < population.size(); ++i ) + { + for ( size_t j = 0; j < dimension; ++j ) + { + auto const &lb = lower_bounds[j]; + auto const &ub = upper_bounds[j]; + population[i][j] = lb + dis( gen ) * ( ub - lb ); + } + } + + return population; + } + + template + void validate_initial_guess( ArgumentContainer const &initial_guess, + ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds) + { + std::ostringstream oss; + auto const dimension = lower_bounds.size(); + + if ( initial_guess.size() != dimension ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The initial guess must have the same dimensions as the problem,"; + oss << ", but the problem size is " << dimension << " and the initial guess has " << initial_guess.size() + << " elements."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < dimension; ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + + if ( !FloatUtils::isFinite( initial_guess[i] ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << ", the initial guess is " << initial_guess[i] + << ", make sure all elements of the initial guess are finite."; + throw std::domain_error( oss.str() ); + } + + if ( initial_guess[i] < lb || initial_guess[i] > ub ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << " the initial guess " << initial_guess[i] << " is not in the bounds [" << lb << ", " + << ub << "]."; + throw std::domain_error( oss.str() ); + } + } + } + + // Single-Threaded Coordinate Descent + + // We provide the parameters in a struct-there are too many of them and they are too unwieldy to pass individually: + template struct coordinate_descent_parameters + { + using Real = typename ArgumentContainer::value_type; + ArgumentContainer lower_bounds; + ArgumentContainer upper_bounds; + + Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + size_t max_iterations = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + ArgumentContainer const *initial_guess = nullptr; + }; + + template + void validate_coordinate_descent_parameters( coordinate_descent_parameters const &cd_params ) + { + std::ostringstream oss; + validate_bounds( cd_params.lower_bounds, cd_params.upper_bounds ); + + if ( FloatUtils::isNan( cd_params.step_size ) || cd_params.step_size <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": step_size > 0 is required, but got step_size=" << cd_params.step_size << "."; + throw std::domain_error( oss.str() ); + } + + if ( cd_params.max_iterations < 1 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be at least one generation."; + throw std::invalid_argument( oss.str() ); + } + + if ( cd_params.initial_guess ) + { + validate_initial_guess( *cd_params.initial_guess, cd_params.lower_bounds, cd_params.upper_bounds ); + } + } + + template + ArgumentContainer coordinate_descent( const Func cost_function, + coordinate_descent_parameters const &cd_params, + URBG &gen, + std::invoke_result_t target_value, + bool *cancellation = nullptr, + std::vector>> *queries = nullptr, + std::invoke_result_t *current_minimum_cost = nullptr) + { + using ResultType = std::invoke_result_t; + + validate_coordinate_descent_parameters( cd_params ); + const size_t dimension = cd_params.lower_bounds.size(); + const size_t candidates_number = 2 * dimension; + auto step_size = cd_params.step_size; + auto max_iterations = cd_params.max_iterations; + auto guess = random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); + + if ( cd_params.initial_guess ) + { + guess[0] = *cd_params.initial_guess; + } + + std::vector candidates( candidates_number ); + std::vector costs( candidates_number ); + ArgumentContainer current_minimum = guess[0]; + + for ( size_t i = 0; i < max_iterations; ++i ) + { + for ( size_t j = 0; j < candidates_number; ++j) + { + candidates[j] = guess[0]; + size_t index = j / 2; + size_t sign = ( j % 2 == 0 ? 1 : -1 ); + candidates[j][index] += sign * step_size; + + costs[j] = cost_function( candidates[j] ); + + if ( current_minimum_cost && costs[j] < *current_minimum_cost ) + { + *current_minimum_cost = costs[j]; + current_minimum = candidates[j]; + } + + if ( !FloatUtils::isNan( target_value ) && costs[j] <= target_value ) + { + break; + } + } + + guess[0] = current_minimum; + } + + return guess[0]; + } + +} // namespace NLR +#endif diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 6b7b39c929..2189b563ca 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -348,16 +348,15 @@ void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map>(); + auto opt_params = coordinate_descent_parameters>(); unsigned depth = layers.size(); opt_params.lower_bounds.resize( depth, 0 ); opt_params.upper_bounds.resize( depth, 1 ); - opt_params.threads = 1; double value_to_reach = 0; std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); // Find optimal coeffs and run final parameterised symbolic bound tightening + backward LP relaxation. - auto optimal_coeffs = differential_evolution( EstimateVolumeBind, opt_params, rng, value_to_reach ); + auto optimal_coeffs = coordinate_descent( EstimateVolumeBind, opt_params, rng, value_to_reach ); for ( unsigned i = 0; i < layers.size(); ++i ) layers[i]->computeParameterisedSymbolicBounds( optimal_coeffs[i], true ); diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index 64fd022fc4..41f5ac1a3f 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -16,7 +16,7 @@ #ifndef __LPFormulator_h__ #define __LPFormulator_h__ -#include "DifferentialEvolution.h" +#include "CoordinateDescent.h" #include "GurobiWrapper.h" #include "LayerOwner.h" #include "Map.h" From b71935947851c97babcbdcddda20458709c91b26 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 24 Oct 2024 20:16:45 +0300 Subject: [PATCH 26/72] 0-6-13: Timing PreimageApproximation with coordinate descent (very slow). 0-6-13: Timing PreimageApproximation with coordinate descent (very slow). --- src/nlr/tests/Test_NetworkLevelReasoner.h | 128 ++++++++++++++++++++++ 1 file changed, 128 insertions(+) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index bc4d620609..0c16ae1601 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -12177,6 +12177,134 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } + void test_preimage_approximation_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List polygonal_bounds; + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintPolygonalTightenings( polygonal_bounds ) ); + for ( auto &bound : polygonal_bounds ) + { + bound.dump(); + } + /*TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( + { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( + { Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) );*/ + } + bool boundsEqual( const List &bounds, const List &expectedBounds ) { if ( bounds.size() != expectedBounds.size() ) From f29ccf08e8f367405d1fc590ed50b3a55ada9580 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Wed, 6 Nov 2024 11:19:24 +0200 Subject: [PATCH 27/72] 0-6-14: Small Fix (same as 0-5---4). 0-6-14: Small Fix (same as 0-5---4). --- src/engine/Engine.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index d41ac92dd7..534ca5f9b9 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -1579,7 +1579,9 @@ void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) // TODO: Remove this block after getting ready to support sigmoid with MILP Bound // Tightening. - if ( _milpSolverBoundTighteningType != MILPSolverBoundTighteningType::NONE && + if ( ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::MILP_ENCODING || + _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL || + _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION ) && _preprocessedQuery->getNonlinearConstraints().size() > 0 ) throw MarabouError( MarabouError::FEATURE_NOT_YET_SUPPORTED, "Marabou doesn't support sigmoid with MILP Bound Tightening" ); From 0069af2f3995161d6772316aacb54513ccf02a04 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 20:50:22 +0200 Subject: [PATCH 28/72] 0-6--14: Current progress in PreimageApproximation (0-5--6 fixes, optimization algorithms, parameters, clang-format,) - Apply 0-5--6 fixes (Backward-Forward Algorithm branch) to this branch. - Add new optimization algorithms for PreimageApproximation: Coordinate Descent, Gradient Descent, Adam Optimizer. - Change default algorithm (Adam) + parameters to reduce runtime. - Apply clang-format to all files changed. --- src/nlr/AdamOptimizer.h | 354 + src/nlr/CoordinateDescent.h | 426 +- src/nlr/DifferentialEvolution.h | 875 +- src/nlr/Engine.cpp | 3813 +++++++ src/nlr/GlobalConfiguration.cpp | 238 + src/nlr/GlobalConfiguration.h | 315 + src/nlr/GradientDescent.h | 315 + src/nlr/LPFormulator.cpp | 336 +- src/nlr/LPFormulator.h | 81 +- src/nlr/Layer.cpp | 567 +- src/nlr/Layer.h | 22 +- src/nlr/LayerOwner.h | 2 +- src/nlr/MILPSolverBoundTighteningType.h | 46 + src/nlr/NetworkLevelReasoner.cpp | 3 +- src/nlr/NetworkLevelReasoner.h | 8 +- src/nlr/PolygonalTightening.h | 100 + src/nlr/Test_NetworkLevelReasoner.h | 12348 ++++++++++++++++++++++ 17 files changed, 18755 insertions(+), 1094 deletions(-) create mode 100644 src/nlr/AdamOptimizer.h create mode 100644 src/nlr/Engine.cpp create mode 100644 src/nlr/GlobalConfiguration.cpp create mode 100644 src/nlr/GlobalConfiguration.h create mode 100644 src/nlr/GradientDescent.h create mode 100644 src/nlr/MILPSolverBoundTighteningType.h create mode 100644 src/nlr/PolygonalTightening.h create mode 100644 src/nlr/Test_NetworkLevelReasoner.h diff --git a/src/nlr/AdamOptimizer.h b/src/nlr/AdamOptimizer.h new file mode 100644 index 0000000000..7d97046329 --- /dev/null +++ b/src/nlr/AdamOptimizer.h @@ -0,0 +1,354 @@ +/* + * Copyright Nick Thompson, 2024 + * Use, modification and distribution are subject to the + * Boost Software License, Version 1.0. (See accompanying file + * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) + */ +#ifndef ADAM_OPTIMIZER +#define ADAM_OPTIMIZER + +#include "FloatUtils.h" +#include "GlobalConfiguration.h" + +#include // for std::sort +#include +#include +#include +#include +#include +#include +#include // for std::false_type +#include +#include + +namespace NLR { + +template struct has_resize : std::false_type +{ +}; + +template +struct has_resize().resize( size_t{} ) )>> : std::true_type +{ +}; + +template constexpr bool has_resize_v = has_resize::value; + +template +void validate_bounds( ArgumentContainer const &lower_bounds, ArgumentContainer const &upper_bounds ) +{ + std::ostringstream oss; + + if ( lower_bounds.size() == 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The dimension of the problem cannot be zero."; + throw std::domain_error( oss.str() ); + } + + if ( upper_bounds.size() != lower_bounds.size() ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be the same number of lower bounds as upper bounds, but given "; + oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() + << " lower bounds."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < lower_bounds.size(); ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + if ( lb > ub ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be greater than or equal to the lower bound, but the " + "upper bound is " + << ub << " and the lower is " << lb << "."; + throw std::domain_error( oss.str() ); + } + + if ( !FloatUtils::isFinite( lb ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The lower bound must be finite, but got " << lb << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a " + "standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); + } + + if ( !FloatUtils::isFinite( ub ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be finite, but got " << ub << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a " + "standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); + } + } +} + +template +std::vector random_initial_population( ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds, + size_t initial_population_size, + URBG &&gen ) +{ + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + constexpr bool has_resize = has_resize_v; + std::vector population( initial_population_size ); + auto const dimension = lower_bounds.size(); + + for ( size_t i = 0; i < population.size(); ++i ) + { + if constexpr ( has_resize ) + population[i].resize( dimension ); + else + { + // Argument type must be known at compile-time; like std::array: + if ( population[i].size() != dimension ) + { + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": For containers which do not have resize, the default size must be the " + "same as the dimension, "; + oss << "but the default container size is " << population[i].size() + << " and the dimension of the problem is " << dimension << "."; + oss << " The function argument container type is " + << typeid( ArgumentContainer ).name() << ".\n"; + throw std::runtime_error( oss.str() ); + } + } + } + + // Why don't we provide an option to initialize with (say) a Gaussian distribution? + // > If the optimum's location is fairly well known, + // > a Gaussian distribution may prove somewhat faster, although it + // > may also increase the probability that the population will converge prematurely. + // > In general, uniform distributions are preferred, since they best reflect + // > the lack of knowledge about the optimum's location. + // - Differential Evolution: A Practical Approach to Global Optimization + // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are + // preferable. So this is something that could be investigated and potentially improved. + std::uniform_real_distribution dis( DimensionlessReal( 0 ), + DimensionlessReal( 1 ) ); + for ( size_t i = 0; i < population.size(); ++i ) + { + for ( size_t j = 0; j < dimension; ++j ) + { + auto const &lb = lower_bounds[j]; + auto const &ub = upper_bounds[j]; + population[i][j] = lb + dis( gen ) * ( ub - lb ); + } + } + + return population; +} + +template +void validate_initial_guess( ArgumentContainer const &initial_guess, + ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds ) +{ + std::ostringstream oss; + auto const dimension = lower_bounds.size(); + + if ( initial_guess.size() != dimension ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The initial guess must have the same dimensions as the problem,"; + oss << ", but the problem size is " << dimension << " and the initial guess has " + << initial_guess.size() << " elements."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < dimension; ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + + if ( !FloatUtils::isFinite( initial_guess[i] ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << ", the initial guess is " << initial_guess[i] + << ", make sure all elements of the initial guess are finite."; + throw std::domain_error( oss.str() ); + } + + if ( initial_guess[i] < lb || initial_guess[i] > ub ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << " the initial guess " << initial_guess[i] + << " is not in the bounds [" << lb << ", " << ub << "]."; + throw std::domain_error( oss.str() ); + } + } +} + +// Single-Threaded Adam Optimizer + +// We provide the parameters in a struct-there are too many of them and they are too unwieldy to +// pass individually: +template struct adam_optimizer_parameters +{ + using Real = typename ArgumentContainer::value_type; + ArgumentContainer lower_bounds; + ArgumentContainer upper_bounds; + + Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + Real lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + Real lr = 0.05; + Real epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; + Real beta1 = 0.9; + Real beta2 = 0.999; + Real weight_decay = 0.2; + bool maximize = false; + size_t max_iterations = 100; + ArgumentContainer const *initial_guess = nullptr; +}; + +template +void validate_adam_optimizer_parameters( + adam_optimizer_parameters const &cd_params ) +{ + std::ostringstream oss; + validate_bounds( cd_params.lower_bounds, cd_params.upper_bounds ); + + if ( FloatUtils::isNan( cd_params.step_size ) || cd_params.step_size <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": step_size > 0 is required, but got step_size=" << cd_params.step_size << "."; + throw std::domain_error( oss.str() ); + } + + if ( FloatUtils::isNan( cd_params.epsilon ) || cd_params.epsilon <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": epsilon > 0 is required, but got epsilon=" << cd_params.epsilon << "."; + throw std::domain_error( oss.str() ); + } + + if ( FloatUtils::isNan( cd_params.beta1 ) || cd_params.beta1 <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": beta1 > 0 is required, but got beta1=" << cd_params.beta1 << "."; + throw std::domain_error( oss.str() ); + } + + if ( FloatUtils::isNan( cd_params.beta2 ) || cd_params.beta2 <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": beta2 > 0 is required, but got beta2=" << cd_params.beta2 << "."; + throw std::domain_error( oss.str() ); + } + + if ( FloatUtils::isNan( cd_params.lr ) || cd_params.lr <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": lr > 0 is required, but got lr=" << cd_params.lr << "."; + throw std::domain_error( oss.str() ); + } + + if ( FloatUtils::isNan( cd_params.weight_decay ) || cd_params.weight_decay < 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": weight_decay >= 0 is required, but got weight_decay=" << cd_params.weight_decay + << "."; + throw std::domain_error( oss.str() ); + } + + if ( cd_params.max_iterations < 1 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be at least one generation."; + throw std::invalid_argument( oss.str() ); + } + + if ( cd_params.initial_guess ) + { + validate_initial_guess( + *cd_params.initial_guess, cd_params.lower_bounds, cd_params.upper_bounds ); + } +} + +template +ArgumentContainer adam_optimizer( + const Func cost_function, + adam_optimizer_parameters const &cd_params, + URBG &gen, + std::invoke_result_t target_value, + bool *cancellation = nullptr, + std::vector>> + *queries = nullptr, + std::invoke_result_t *current_minimum_cost = nullptr ) +{ + using ResultType = std::invoke_result_t; + + validate_adam_optimizer_parameters( cd_params ); + const size_t dimension = cd_params.lower_bounds.size(); + auto step_size = cd_params.step_size; + auto max_iterations = cd_params.max_iterations; + auto maximize = cd_params.maximize; + auto epsilon = cd_params.epsilon; + auto beta1 = cd_params.beta1; + auto beta2 = cd_params.beta2; + auto lr = cd_params.lr; + auto weight_decay = cd_params.weight_decay; + auto guess = + random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); + + if ( cd_params.initial_guess ) + { + guess[0] = *cd_params.initial_guess; + } + + std::vector candidates( dimension ); + std::vector gradient( dimension ); + std::vector first_moment( dimension ); + std::vector second_moment( dimension ); + ArgumentContainer current_minimum = guess[0]; + + for ( size_t j = 0; j < dimension; ++j ) + { + first_moment[j] = 0; + second_moment[j] = 0; + } + + for ( size_t i = 0; i < max_iterations; ++i ) + { + for ( size_t j = 0; j < dimension; ++j ) + { + candidates[j] = guess[0]; + candidates[j][j] += step_size; + + size_t sign = ( maximize == false ? 1 : -1 ); + double cost = cost_function( candidates[j] ); + gradient[j] = sign * cost / step_size + weight_decay * guess[0][j]; + + if ( !FloatUtils::isNan( target_value ) && cost <= target_value ) + { + guess[0] = candidates[j]; + break; + } + } + + for ( size_t j = 0; j < dimension; ++j ) + { + first_moment[j] = beta1 * first_moment[j] + ( 1 - beta1 ) * gradient[j]; + first_moment[j] /= ( 1 - std::pow( beta1, step_size ) ); + + second_moment[j] = beta2 * first_moment[j] + ( 1 - beta2 ) * std::pow( gradient[j], 2 ); + second_moment[j] /= ( 1 - std::pow( beta2, step_size ) ); + + guess[0][j] -= lr * first_moment[j] / ( std::sqrt( second_moment[j] ) + epsilon ); + } + } + + return guess[0]; +} + +} // namespace NLR +#endif diff --git a/src/nlr/CoordinateDescent.h b/src/nlr/CoordinateDescent.h index dd9d1c0fd1..83f01ed197 100644 --- a/src/nlr/CoordinateDescent.h +++ b/src/nlr/CoordinateDescent.h @@ -17,260 +17,280 @@ #include #include #include -#include // for std::false_type +#include // for std::false_type #include #include namespace NLR { - template struct has_resize : std::false_type {}; +template struct has_resize : std::false_type +{ +}; - template struct has_resize().resize( size_t{} ) )>> : std::true_type {}; +template +struct has_resize().resize( size_t{} ) )>> : std::true_type +{ +}; - template constexpr bool has_resize_v = has_resize::value; +template constexpr bool has_resize_v = has_resize::value; - template - void validate_bounds( ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds ) +template +void validate_bounds( ArgumentContainer const &lower_bounds, ArgumentContainer const &upper_bounds ) +{ + std::ostringstream oss; + + if ( lower_bounds.size() == 0 ) { - std::ostringstream oss; - - if ( lower_bounds.size() == 0 ) + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The dimension of the problem cannot be zero."; + throw std::domain_error( oss.str() ); + } + + if ( upper_bounds.size() != lower_bounds.size() ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be the same number of lower bounds as upper bounds, but given "; + oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() + << " lower bounds."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < lower_bounds.size(); ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + if ( lb > ub ) { oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The dimension of the problem cannot be zero."; + oss << ": The upper bound must be greater than or equal to the lower bound, but the " + "upper bound is " + << ub << " and the lower is " << lb << "."; throw std::domain_error( oss.str() ); } - - if ( upper_bounds.size() != lower_bounds.size() ) + + if ( !FloatUtils::isFinite( lb ) ) { oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be the same number of lower bounds as upper bounds, but given "; - oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() << " lower bounds."; + oss << ": The lower bound must be finite, but got " << lb << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a " + "standard infinite->finite " + "transform."; throw std::domain_error( oss.str() ); } - - for ( size_t i = 0; i < lower_bounds.size(); ++i ) + + if ( !FloatUtils::isFinite( ub ) ) { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - if ( lb > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be greater than or equal to the lower bound, but the upper bound is " << ub - << " and the lower is " << lb << "."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( lb ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The lower bound must be finite, but got " << lb << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( ub ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be finite, but got " << ub << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be finite, but got " << ub << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a " + "standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); } } +} - template - std::vector random_initial_population( ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds, - size_t initial_population_size, URBG &&gen ) - { - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - constexpr bool has_resize = has_resize_v; - std::vector population( initial_population_size ); - auto const dimension = lower_bounds.size(); - - for ( size_t i = 0; i < population.size(); ++i ) - { - if constexpr ( has_resize ) - population[i].resize( dimension ); - else - { - // Argument type must be known at compile-time; like std::array: - if ( population[i].size() != dimension ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": For containers which do not have resize, the default size must be the same as the dimension, "; - oss << "but the default container size is " << population[i].size() << " and the dimension of the problem is " - << dimension << "."; - oss << " The function argument container type is " << typeid( ArgumentContainer ).name() << ".\n"; - throw std::runtime_error( oss.str() ); - } - } - } +template +std::vector random_initial_population( ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds, + size_t initial_population_size, + URBG &&gen ) +{ + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + constexpr bool has_resize = has_resize_v; + std::vector population( initial_population_size ); + auto const dimension = lower_bounds.size(); - // Why don't we provide an option to initialize with (say) a Gaussian distribution? - // > If the optimum's location is fairly well known, - // > a Gaussian distribution may prove somewhat faster, although it - // > may also increase the probability that the population will converge prematurely. - // > In general, uniform distributions are preferred, since they best reflect - // > the lack of knowledge about the optimum's location. - // - Differential Evolution: A Practical Approach to Global Optimization - // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are preferable. - // So this is something that could be investigated and potentially improved. - std::uniform_real_distribution dis( DimensionlessReal( 0 ), DimensionlessReal( 1 ) ); - for ( size_t i = 0; i < population.size(); ++i ) + for ( size_t i = 0; i < population.size(); ++i ) + { + if constexpr ( has_resize ) + population[i].resize( dimension ); + else { - for ( size_t j = 0; j < dimension; ++j ) + // Argument type must be known at compile-time; like std::array: + if ( population[i].size() != dimension ) { - auto const &lb = lower_bounds[j]; - auto const &ub = upper_bounds[j]; - population[i][j] = lb + dis( gen ) * ( ub - lb ); + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": For containers which do not have resize, the default size must be the " + "same as the dimension, "; + oss << "but the default container size is " << population[i].size() + << " and the dimension of the problem is " << dimension << "."; + oss << " The function argument container type is " + << typeid( ArgumentContainer ).name() << ".\n"; + throw std::runtime_error( oss.str() ); } } - - return population; } - template - void validate_initial_guess( ArgumentContainer const &initial_guess, - ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds) + // Why don't we provide an option to initialize with (say) a Gaussian distribution? + // > If the optimum's location is fairly well known, + // > a Gaussian distribution may prove somewhat faster, although it + // > may also increase the probability that the population will converge prematurely. + // > In general, uniform distributions are preferred, since they best reflect + // > the lack of knowledge about the optimum's location. + // - Differential Evolution: A Practical Approach to Global Optimization + // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are + // preferable. So this is something that could be investigated and potentially improved. + std::uniform_real_distribution dis( DimensionlessReal( 0 ), + DimensionlessReal( 1 ) ); + for ( size_t i = 0; i < population.size(); ++i ) { - std::ostringstream oss; - auto const dimension = lower_bounds.size(); - - if ( initial_guess.size() != dimension ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The initial guess must have the same dimensions as the problem,"; - oss << ", but the problem size is " << dimension << " and the initial guess has " << initial_guess.size() - << " elements."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < dimension; ++i ) + for ( size_t j = 0; j < dimension; ++j ) { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - - if ( !FloatUtils::isFinite( initial_guess[i] ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << ", the initial guess is " << initial_guess[i] - << ", make sure all elements of the initial guess are finite."; - throw std::domain_error( oss.str() ); - } - - if ( initial_guess[i] < lb || initial_guess[i] > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << " the initial guess " << initial_guess[i] << " is not in the bounds [" << lb << ", " - << ub << "]."; - throw std::domain_error( oss.str() ); - } + auto const &lb = lower_bounds[j]; + auto const &ub = upper_bounds[j]; + population[i][j] = lb + dis( gen ) * ( ub - lb ); } } - // Single-Threaded Coordinate Descent + return population; +} - // We provide the parameters in a struct-there are too many of them and they are too unwieldy to pass individually: - template struct coordinate_descent_parameters +template +void validate_initial_guess( ArgumentContainer const &initial_guess, + ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds ) +{ + std::ostringstream oss; + auto const dimension = lower_bounds.size(); + + if ( initial_guess.size() != dimension ) { - using Real = typename ArgumentContainer::value_type; - ArgumentContainer lower_bounds; - ArgumentContainer upper_bounds; - - Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; - size_t max_iterations = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - ArgumentContainer const *initial_guess = nullptr; - }; - - template - void validate_coordinate_descent_parameters( coordinate_descent_parameters const &cd_params ) + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The initial guess must have the same dimensions as the problem,"; + oss << ", but the problem size is " << dimension << " and the initial guess has " + << initial_guess.size() << " elements."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < dimension; ++i ) { - std::ostringstream oss; - validate_bounds( cd_params.lower_bounds, cd_params.upper_bounds ); - - if ( FloatUtils::isNan( cd_params.step_size ) || cd_params.step_size <= 0 ) + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + + if ( !FloatUtils::isFinite( initial_guess[i] ) ) { oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": step_size > 0 is required, but got step_size=" << cd_params.step_size << "."; + oss << ": At index " << i << ", the initial guess is " << initial_guess[i] + << ", make sure all elements of the initial guess are finite."; throw std::domain_error( oss.str() ); } - - if ( cd_params.max_iterations < 1 ) + + if ( initial_guess[i] < lb || initial_guess[i] > ub ) { oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be at least one generation."; - throw std::invalid_argument( oss.str() ); - } - - if ( cd_params.initial_guess ) - { - validate_initial_guess( *cd_params.initial_guess, cd_params.lower_bounds, cd_params.upper_bounds ); + oss << ": At index " << i << " the initial guess " << initial_guess[i] + << " is not in the bounds [" << lb << ", " << ub << "]."; + throw std::domain_error( oss.str() ); } } +} + +// Single-Threaded Coordinate Descent + +// We provide the parameters in a struct-there are too many of them and they are too unwieldy to +// pass individually: +template struct coordinate_descent_parameters +{ + using Real = typename ArgumentContainer::value_type; + ArgumentContainer lower_bounds; + ArgumentContainer upper_bounds; + + Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + // size_t max_iterations = + // GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + size_t max_iterations = 2; + ArgumentContainer const *initial_guess = nullptr; +}; + +template +void validate_coordinate_descent_parameters( + coordinate_descent_parameters const &cd_params ) +{ + std::ostringstream oss; + validate_bounds( cd_params.lower_bounds, cd_params.upper_bounds ); + + if ( FloatUtils::isNan( cd_params.step_size ) || cd_params.step_size <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": step_size > 0 is required, but got step_size=" << cd_params.step_size << "."; + throw std::domain_error( oss.str() ); + } - template - ArgumentContainer coordinate_descent( const Func cost_function, - coordinate_descent_parameters const &cd_params, - URBG &gen, - std::invoke_result_t target_value, - bool *cancellation = nullptr, - std::vector>> *queries = nullptr, - std::invoke_result_t *current_minimum_cost = nullptr) + if ( cd_params.max_iterations < 1 ) { - using ResultType = std::invoke_result_t; - - validate_coordinate_descent_parameters( cd_params ); - const size_t dimension = cd_params.lower_bounds.size(); - const size_t candidates_number = 2 * dimension; - auto step_size = cd_params.step_size; - auto max_iterations = cd_params.max_iterations; - auto guess = random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); - - if ( cd_params.initial_guess ) - { - guess[0] = *cd_params.initial_guess; - } - - std::vector candidates( candidates_number ); - std::vector costs( candidates_number ); - ArgumentContainer current_minimum = guess[0]; - - for ( size_t i = 0; i < max_iterations; ++i ) + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be at least one generation."; + throw std::invalid_argument( oss.str() ); + } + + if ( cd_params.initial_guess ) + { + validate_initial_guess( + *cd_params.initial_guess, cd_params.lower_bounds, cd_params.upper_bounds ); + } +} + +template +ArgumentContainer coordinate_descent( + const Func cost_function, + coordinate_descent_parameters const &cd_params, + URBG &gen, + std::invoke_result_t target_value, + bool *cancellation = nullptr, + std::vector>> + *queries = nullptr, + std::invoke_result_t *current_minimum_cost = nullptr ) +{ + using ResultType = std::invoke_result_t; + + validate_coordinate_descent_parameters( cd_params ); + const size_t dimension = cd_params.lower_bounds.size(); + const size_t candidates_number = 2 * dimension; + auto step_size = cd_params.step_size; + auto max_iterations = cd_params.max_iterations; + auto guess = + random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); + + if ( cd_params.initial_guess ) + { + guess[0] = *cd_params.initial_guess; + } + + std::vector candidates( candidates_number ); + std::vector costs( candidates_number ); + ArgumentContainer current_minimum = guess[0]; + + for ( size_t i = 0; i < max_iterations; ++i ) + { + for ( size_t j = 0; j < candidates_number; ++j ) { - for ( size_t j = 0; j < candidates_number; ++j) + candidates[j] = guess[0]; + size_t index = j / 2; + size_t sign = ( j % 2 == 0 ? 1 : -1 ); + candidates[j][index] += sign * step_size; + + costs[j] = cost_function( candidates[j] ); + + if ( current_minimum_cost && costs[j] < *current_minimum_cost ) { - candidates[j] = guess[0]; - size_t index = j / 2; - size_t sign = ( j % 2 == 0 ? 1 : -1 ); - candidates[j][index] += sign * step_size; - - costs[j] = cost_function( candidates[j] ); - - if ( current_minimum_cost && costs[j] < *current_minimum_cost ) - { - *current_minimum_cost = costs[j]; - current_minimum = candidates[j]; - } - - if ( !FloatUtils::isNan( target_value ) && costs[j] <= target_value ) - { - break; - } + *current_minimum_cost = costs[j]; + current_minimum = candidates[j]; + } + + if ( !FloatUtils::isNan( target_value ) && costs[j] <= target_value ) + { + break; } - - guess[0] = current_minimum; } - - return guess[0]; + + guess[0] = current_minimum; } + return guess[0]; +} + } // namespace NLR #endif diff --git a/src/nlr/DifferentialEvolution.h b/src/nlr/DifferentialEvolution.h index 030f338a84..eb27c0ef0e 100644 --- a/src/nlr/DifferentialEvolution.h +++ b/src/nlr/DifferentialEvolution.h @@ -18,511 +18,532 @@ #include #include #include -#include // for std::false_type +#include // for std::false_type #include #include namespace NLR { - template struct has_resize : std::false_type {}; +template struct has_resize : std::false_type +{ +}; - template struct has_resize().resize( size_t{} ) )>> : std::true_type {}; +template +struct has_resize().resize( size_t{} ) )>> : std::true_type +{ +}; - template constexpr bool has_resize_v = has_resize::value; +template constexpr bool has_resize_v = has_resize::value; - template - void validate_bounds( ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds ) +template +void validate_bounds( ArgumentContainer const &lower_bounds, ArgumentContainer const &upper_bounds ) +{ + std::ostringstream oss; + + if ( lower_bounds.size() == 0 ) { - std::ostringstream oss; - - if ( lower_bounds.size() == 0 ) + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The dimension of the problem cannot be zero."; + throw std::domain_error( oss.str() ); + } + + if ( upper_bounds.size() != lower_bounds.size() ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be the same number of lower bounds as upper bounds, but given "; + oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() + << " lower bounds."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < lower_bounds.size(); ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + if ( lb > ub ) { oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The dimension of the problem cannot be zero."; + oss << ": The upper bound must be greater than or equal to the lower bound, but the " + "upper bound is " + << ub << " and the lower is " << lb << "."; throw std::domain_error( oss.str() ); } - - if ( upper_bounds.size() != lower_bounds.size() ) + + if ( !FloatUtils::isFinite( lb ) ) { oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be the same number of lower bounds as upper bounds, but given "; - oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() << " lower bounds."; + oss << ": The lower bound must be finite, but got " << lb << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a " + "standard infinite->finite " + "transform."; throw std::domain_error( oss.str() ); } - - for ( size_t i = 0; i < lower_bounds.size(); ++i ) + + if ( !FloatUtils::isFinite( ub ) ) { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - if ( lb > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be greater than or equal to the lower bound, but the upper bound is " << ub - << " and the lower is " << lb << "."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( lb ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The lower bound must be finite, but got " << lb << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( ub ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be finite, but got " << ub << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be finite, but got " << ub << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a " + "standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); } } +} + +template +std::vector random_initial_population( ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds, + size_t initial_population_size, + URBG &&gen ) +{ + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + constexpr bool has_resize = has_resize_v; + std::vector population( initial_population_size ); + auto const dimension = lower_bounds.size(); - template - std::vector random_initial_population( ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds, - size_t initial_population_size, URBG &&gen ) + for ( size_t i = 0; i < population.size(); ++i ) { - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - constexpr bool has_resize = has_resize_v; - std::vector population( initial_population_size ); - auto const dimension = lower_bounds.size(); - - for ( size_t i = 0; i < population.size(); ++i ) + if constexpr ( has_resize ) + population[i].resize( dimension ); + else { - if constexpr ( has_resize ) - population[i].resize( dimension ); - else + // Argument type must be known at compile-time; like std::array: + if ( population[i].size() != dimension ) { - // Argument type must be known at compile-time; like std::array: - if ( population[i].size() != dimension ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": For containers which do not have resize, the default size must be the same as the dimension, "; - oss << "but the default container size is " << population[i].size() << " and the dimension of the problem is " - << dimension << "."; - oss << " The function argument container type is " << typeid( ArgumentContainer ).name() << ".\n"; - throw std::runtime_error( oss.str() ); - } + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": For containers which do not have resize, the default size must be the " + "same as the dimension, "; + oss << "but the default container size is " << population[i].size() + << " and the dimension of the problem is " << dimension << "."; + oss << " The function argument container type is " + << typeid( ArgumentContainer ).name() << ".\n"; + throw std::runtime_error( oss.str() ); } } + } - // Why don't we provide an option to initialize with (say) a Gaussian distribution? - // > If the optimum's location is fairly well known, - // > a Gaussian distribution may prove somewhat faster, although it - // > may also increase the probability that the population will converge prematurely. - // > In general, uniform distributions are preferred, since they best reflect - // > the lack of knowledge about the optimum's location. - // - Differential Evolution: A Practical Approach to Global Optimization - // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are preferable. - // So this is something that could be investigated and potentially improved. - std::uniform_real_distribution dis( DimensionlessReal( 0 ), DimensionlessReal( 1 ) ); - for ( size_t i = 0; i < population.size(); ++i ) + // Why don't we provide an option to initialize with (say) a Gaussian distribution? + // > If the optimum's location is fairly well known, + // > a Gaussian distribution may prove somewhat faster, although it + // > may also increase the probability that the population will converge prematurely. + // > In general, uniform distributions are preferred, since they best reflect + // > the lack of knowledge about the optimum's location. + // - Differential Evolution: A Practical Approach to Global Optimization + // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are + // preferable. So this is something that could be investigated and potentially improved. + std::uniform_real_distribution dis( DimensionlessReal( 0 ), + DimensionlessReal( 1 ) ); + for ( size_t i = 0; i < population.size(); ++i ) + { + for ( size_t j = 0; j < dimension; ++j ) { - for ( size_t j = 0; j < dimension; ++j ) - { - auto const &lb = lower_bounds[j]; - auto const &ub = upper_bounds[j]; - population[i][j] = lb + dis( gen ) * ( ub - lb ); - } + auto const &lb = lower_bounds[j]; + auto const &ub = upper_bounds[j]; + population[i][j] = lb + dis( gen ) * ( ub - lb ); } - - return population; } - template - void validate_initial_guess( ArgumentContainer const &initial_guess, - ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds) + return population; +} + +template +void validate_initial_guess( ArgumentContainer const &initial_guess, + ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds ) +{ + std::ostringstream oss; + auto const dimension = lower_bounds.size(); + + if ( initial_guess.size() != dimension ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The initial guess must have the same dimensions as the problem,"; + oss << ", but the problem size is " << dimension << " and the initial guess has " + << initial_guess.size() << " elements."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < dimension; ++i ) { - std::ostringstream oss; - auto const dimension = lower_bounds.size(); - - if ( initial_guess.size() != dimension ) + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + + if ( !FloatUtils::isFinite( initial_guess[i] ) ) { oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The initial guess must have the same dimensions as the problem,"; - oss << ", but the problem size is " << dimension << " and the initial guess has " << initial_guess.size() - << " elements."; + oss << ": At index " << i << ", the initial guess is " << initial_guess[i] + << ", make sure all elements of the initial guess are finite."; throw std::domain_error( oss.str() ); } - - for ( size_t i = 0; i < dimension; ++i ) + + if ( initial_guess[i] < lb || initial_guess[i] > ub ) { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - - if ( !FloatUtils::isFinite( initial_guess[i] ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << ", the initial guess is " << initial_guess[i] - << ", make sure all elements of the initial guess are finite."; - throw std::domain_error( oss.str() ); - } - - if ( initial_guess[i] < lb || initial_guess[i] > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << " the initial guess " << initial_guess[i] << " is not in the bounds [" << lb << ", " - << ub << "]."; - throw std::domain_error( oss.str() ); - } + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << " the initial guess " << initial_guess[i] + << " is not in the bounds [" << lb << ", " << ub << "]."; + throw std::domain_error( oss.str() ); } } +} + +// Return indices corresponding to the minimum function values. +template +std::vector best_indices( std::vector const &function_values ) +{ + const size_t n = function_values.size(); + std::vector indices( n ); - // Return indices corresponding to the minimum function values. - template - std::vector best_indices( std::vector const &function_values ) + for ( size_t i = 0; i < n; ++i ) { - const size_t n = function_values.size(); - std::vector indices( n ); - - for ( size_t i = 0; i < n; ++i ) + indices[i] = i; + } + + std::sort( indices.begin(), indices.end(), [&]( size_t a, size_t b ) { + if ( FloatUtils::isNan( function_values[a] ) ) { - indices[i] = i; + return false; } + if ( FloatUtils::isNan( function_values[b] ) ) + { + return true; + } + return function_values[a] < function_values[b]; + } ); + + return indices; +} - std::sort( indices.begin(), - indices.end(), - [&]( size_t a, size_t b ) - { - if ( FloatUtils::isNan( function_values[a] ) ) - { - return false; - } - if ( FloatUtils::isNan( function_values[b] ) ) - { - return true; - } - return function_values[a] < function_values[b]; - }); - - return indices; +template +auto weighted_lehmer_mean( RandomAccessContainer const &values, + RandomAccessContainer const &weights ) +{ + if ( values.size() != weights.size() ) + { + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be the same number of weights as values, but got " << values.size() + << " values and " << weights.size() << " weights."; + throw std::logic_error( oss.str() ); } - template - auto weighted_lehmer_mean( RandomAccessContainer const & values, - RandomAccessContainer const & weights) + if ( values.size() == 0 ) { - if ( values.size() != weights.size() ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be the same number of weights as values, but got " << values.size() - << " values and " << weights.size() << " weights."; - throw std::logic_error( oss.str() ); - } - - if ( values.size() == 0 ) + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must at least one value provided."; + throw std::logic_error( oss.str() ); + } + + using Real = typename RandomAccessContainer::value_type; + Real numerator = 0; + Real denominator = 0; + + for ( size_t i = 0; i < values.size(); ++i ) + { + if ( weights[i] < 0 || !FloatUtils::isFinite( weights[i] ) ) { std::ostringstream oss; oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must at least one value provided."; - throw std::logic_error( oss.str() ); + oss << ": All weights must be positive and finite, but got received weight " + << weights[i] << " at index " << i << " of " << weights.size() << "."; + throw std::domain_error( oss.str() ); } - - using Real = typename RandomAccessContainer::value_type; - Real numerator = 0; - Real denominator = 0; - - for ( size_t i = 0; i < values.size(); ++i ) - { - if ( weights[i] < 0 || !FloatUtils::isFinite( weights[i] ) ) + + Real tmp = weights[i] * values[i]; + numerator += tmp * values[i]; + denominator += tmp; + } + return numerator / denominator; +} + +// Storn, R., Price, K. (1997). Differential evolution-a simple and efficient heuristic for global +// optimization over continuous spaces. Journal of global optimization, 11, 341-359. See: +// https://www.cp.eng.chula.ac.th/~prabhas//teaching/ec/ec2012/storn_price_de.pdf + +// We provide the parameters in a struct-there are too many of them and they are too unwieldy to +// pass individually: +template struct differential_evolution_parameters +{ + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + ArgumentContainer lower_bounds; + ArgumentContainer upper_bounds; + + // mutation factor is also called scale factor or just F in the literature: + DimensionlessReal mutation_factor = static_cast( 0.65 ); + DimensionlessReal crossover_probability = static_cast( 0.5 ); + + // Population in each generation: + size_t NP = 500; + size_t max_generations = 1000; + ArgumentContainer const *initial_guess = nullptr; + unsigned threads = std::thread::hardware_concurrency(); +}; + +template +void validate_differential_evolution_parameters( + differential_evolution_parameters const &de_params ) +{ + std::ostringstream oss; + validate_bounds( de_params.lower_bounds, de_params.upper_bounds ); + if ( de_params.NP < 4 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The population size must be at least 4, but requested population size of " + << de_params.NP << "."; + throw std::invalid_argument( oss.str() ); + } + + // From: "Differential Evolution: A Practical Approach to Global Optimization (Natural Computing + // Series)" > The scale factor, F in (0,1+), is a positive real number that controls the rate at + // which the population evolves. > While there is no upper limit on F, effective values are + // seldom greater than 1.0. + // ... + // Also see "Limits on F", Section 2.5.1: + // > This discontinuity at F = 1 reduces the number of mutants by half and can result in erratic + // convergence... + auto F = de_params.mutation_factor; + if ( FloatUtils::isNan( F ) || F >= 1 || F <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": F in (0, 1) is required, but got F=" << F << "."; + throw std::domain_error( oss.str() ); + } + + if ( de_params.max_generations < 1 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be at least one generation."; + throw std::invalid_argument( oss.str() ); + } + + if ( de_params.initial_guess ) + { + validate_initial_guess( + *de_params.initial_guess, de_params.lower_bounds, de_params.upper_bounds ); + } + + if ( de_params.threads == 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be at least one thread."; + throw std::invalid_argument( oss.str() ); + } +} + +template +ArgumentContainer differential_evolution( + const Func cost_function, + differential_evolution_parameters const &de_params, + URBG &gen, + std::invoke_result_t target_value = + std::numeric_limits>::quiet_NaN(), + std::atomic *cancellation = nullptr, + std::vector>> + *queries = nullptr, + std::atomic> *current_minimum_cost = nullptr ) +{ + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + using ResultType = std::invoke_result_t; + + validate_differential_evolution_parameters( de_params ); + const size_t dimension = de_params.lower_bounds.size(); + auto NP = de_params.NP; + auto population = + random_initial_population( de_params.lower_bounds, de_params.upper_bounds, NP, gen ); + + if ( de_params.initial_guess ) + { + population[0] = *de_params.initial_guess; + } + + std::vector cost( NP, std::numeric_limits::quiet_NaN() ); + std::atomic target_attained = false; + + // This mutex is only used if the queries are stored: + std::mutex mt; + + std::vector thread_pool; + auto const threads = de_params.threads; + for ( size_t j = 0; j < threads; ++j ) + { + // Note that if some members of the population take way longer to compute, + // then this parallelization strategy is very suboptimal. + // However, we tried using std::async (which should be robust to this particular problem), + // but the overhead was just totally unacceptable on ARM Macs (the only platform tested). + // As the economists say "there are no solutions, only tradeoffs". + + thread_pool.emplace_back( [&, j]() { + for ( size_t i = j; i < cost.size(); i += threads ) { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": All weights must be positive and finite, but got received weight " << weights[i] - << " at index " << i << " of " << weights.size() << "."; - throw std::domain_error( oss.str() ); + cost[i] = cost_function( population[i] ); + if ( current_minimum_cost && cost[i] < *current_minimum_cost ) + { + *current_minimum_cost = cost[i]; + } + if ( queries ) + { + std::scoped_lock lock( mt ); + queries->push_back( std::make_pair( population[i], cost[i] ) ); + } + + if ( !FloatUtils::isNan( target_value ) && cost[i] <= target_value ) + { + target_attained = true; + } } - - Real tmp = weights[i] * values[i]; - numerator += tmp * values[i]; - denominator += tmp; - } - return numerator / denominator; + } ); } - // Storn, R., Price, K. (1997). Differential evolution-a simple and efficient heuristic for global optimization over - // continuous spaces. - // Journal of global optimization, 11, 341-359. - // See: - // https://www.cp.eng.chula.ac.th/~prabhas//teaching/ec/ec2012/storn_price_de.pdf - - // We provide the parameters in a struct-there are too many of them and they are too unwieldy to pass individually: - template struct differential_evolution_parameters + for ( auto &thread : thread_pool ) { - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - ArgumentContainer lower_bounds; - ArgumentContainer upper_bounds; - - // mutation factor is also called scale factor or just F in the literature: - DimensionlessReal mutation_factor = static_cast( 0.65 ); - DimensionlessReal crossover_probability = static_cast( 0.5 ); - - // Population in each generation: - size_t NP = 500; - size_t max_generations = 1000; - ArgumentContainer const *initial_guess = nullptr; - unsigned threads = std::thread::hardware_concurrency(); - }; - - template - void validate_differential_evolution_parameters( differential_evolution_parameters const &de_params ) + thread.join(); + } + + std::vector trial_vectors( NP ); + for ( size_t i = 0; i < NP; ++i ) { - std::ostringstream oss; - validate_bounds( de_params.lower_bounds, de_params.upper_bounds ); - if ( de_params.NP < 4 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The population size must be at least 4, but requested population size of " << de_params.NP << "."; - throw std::invalid_argument( oss.str() ); - } - - // From: "Differential Evolution: A Practical Approach to Global Optimization (Natural Computing Series)" - // > The scale factor, F in (0,1+), is a positive real number that controls the rate at which the population evolves. - // > While there is no upper limit on F, effective values are seldom greater than 1.0. - // ... - // Also see "Limits on F", Section 2.5.1: - // > This discontinuity at F = 1 reduces the number of mutants by half and can result in erratic convergence... - auto F = de_params.mutation_factor; - if ( FloatUtils::isNan( F ) || F >= 1 || F <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": F in (0, 1) is required, but got F=" << F << "."; - throw std::domain_error( oss.str() ); - } - - if ( de_params.max_generations < 1 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be at least one generation."; - throw std::invalid_argument( oss.str() ); - } - - if ( de_params.initial_guess ) - { - validate_initial_guess( *de_params.initial_guess, de_params.lower_bounds, de_params.upper_bounds ); - } - - if ( de_params.threads == 0 ) + if constexpr ( has_resize_v ) { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be at least one thread."; - throw std::invalid_argument( oss.str() ); + trial_vectors[i].resize( dimension ); } } - template - ArgumentContainer differential_evolution( const Func cost_function, - differential_evolution_parameters const &de_params, - URBG &gen, - std::invoke_result_t target_value - = std::numeric_limits>::quiet_NaN(), - std::atomic *cancellation = nullptr, - std::vector>> *queries = nullptr, - std::atomic> *current_minimum_cost = nullptr) + std::vector thread_generators( threads ); + for ( size_t j = 0; j < threads; ++j ) + { + thread_generators[j].seed( gen() ); + } + + // std::vector isn't threadsafe! + std::vector updated_indices( NP, 0 ); + + for ( size_t generation = 0; generation < de_params.max_generations; ++generation ) { - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - using ResultType = std::invoke_result_t; - - validate_differential_evolution_parameters( de_params ); - const size_t dimension = de_params.lower_bounds.size(); - auto NP = de_params.NP; - auto population = random_initial_population( de_params.lower_bounds, de_params.upper_bounds, NP, gen ); - - if ( de_params.initial_guess ) + if ( cancellation && *cancellation ) { - population[0] = *de_params.initial_guess; + break; } - - std::vector cost( NP, std::numeric_limits::quiet_NaN() ); - std::atomic target_attained = false; - - // This mutex is only used if the queries are stored: - std::mutex mt; - - std::vector thread_pool; - auto const threads = de_params.threads; + if ( target_attained ) + { + break; + } + + thread_pool.resize( 0 ); for ( size_t j = 0; j < threads; ++j ) { - // Note that if some members of the population take way longer to compute, - // then this parallelization strategy is very suboptimal. - // However, we tried using std::async (which should be robust to this particular problem), - // but the overhead was just totally unacceptable on ARM Macs (the only platform tested). - // As the economists say "there are no solutions, only tradeoffs". - - thread_pool.emplace_back( [&, j]() - { - for ( size_t i = j; i < cost.size(); i += threads ) - { - cost[i] = cost_function( population[i] ); - if ( current_minimum_cost && cost[i] < *current_minimum_cost ) - { - *current_minimum_cost = cost[i]; - } - if ( queries ) - { - std::scoped_lock lock( mt ); - queries->push_back( std::make_pair( population[i], cost[i] ) ); - } - - if ( !FloatUtils::isNan( target_value ) && cost[i] <= target_value ) - { - target_attained = true; - } - } - }); + thread_pool.emplace_back( [&, j]() { + auto &tlg = thread_generators[j]; + std::uniform_real_distribution unif01( DimensionlessReal( 0 ), + DimensionlessReal( 1 ) ); + + for ( size_t i = j; i < cost.size(); i += threads ) + { + if ( target_attained ) + { + return; + } + if ( cancellation && *cancellation ) + { + return; + } + + size_t r1, r2, r3; + do + { + r1 = tlg() % NP; + } + while ( r1 == i ); + + do + { + r2 = tlg() % NP; + } + while ( r2 == i || r2 == r1 ); + + do + { + r3 = tlg() % NP; + } + while ( r3 == i || r3 == r2 || r3 == r1 ); + + + for ( size_t k = 0; k < dimension; ++k ) + { + // See equation (4) of the reference: + auto guaranteed_changed_idx = tlg() % dimension; + if ( unif01( tlg ) < de_params.crossover_probability || + k == guaranteed_changed_idx ) + { + auto tmp = + population[r1][k] + de_params.mutation_factor * + ( population[r2][k] - population[r3][k] ); + auto const &lb = de_params.lower_bounds[k]; + auto const &ub = de_params.upper_bounds[k]; + // Some others recommend regenerating the indices rather than clamping; + // I dunno seems like it could get stuck regenerating . . . + trial_vectors[i][k] = std::clamp( tmp, lb, ub ); + } + else + { + trial_vectors[i][k] = population[i][k]; + } + } + + auto const trial_cost = cost_function( trial_vectors[i] ); + if ( FloatUtils::isNan( trial_cost ) ) + { + continue; + } + if ( queries ) + { + std::scoped_lock lock( mt ); + queries->push_back( std::make_pair( trial_vectors[i], trial_cost ) ); + } + + if ( trial_cost < cost[i] || FloatUtils::isNan( cost[i] ) ) + { + cost[i] = trial_cost; + if ( !FloatUtils::isNan( target_value ) && cost[i] <= target_value ) + { + target_attained = true; + } + + if ( current_minimum_cost && cost[i] < *current_minimum_cost ) + { + *current_minimum_cost = cost[i]; + } + + // Can't do this! It's a race condition! + // population[i] = trial_vectors[i]; + // Instead mark all the indices that need to be updated: + updated_indices[i] = 1; + } + } + } ); } - for ( auto &thread : thread_pool ) { thread.join(); } - std::vector trial_vectors( NP ); for ( size_t i = 0; i < NP; ++i ) { - if constexpr ( has_resize_v ) + if ( updated_indices[i] ) { - trial_vectors[i].resize( dimension ); + population[i] = trial_vectors[i]; + updated_indices[i] = 0; } } - - std::vector thread_generators( threads ); - for ( size_t j = 0; j < threads; ++j ) - { - thread_generators[j].seed( gen() ); - } - - // std::vector isn't threadsafe! - std::vector updated_indices( NP, 0 ); - - for ( size_t generation = 0; generation < de_params.max_generations; ++generation ) - { - if ( cancellation && *cancellation ) - { - break; - } - if ( target_attained ) - { - break; - } - - thread_pool.resize( 0 ); - for ( size_t j = 0; j < threads; ++j ) - { - thread_pool.emplace_back( [&, j]() - { - auto& tlg = thread_generators[j]; - std::uniform_real_distribution unif01( DimensionlessReal( 0 ), DimensionlessReal( 1 ) ); - - for ( size_t i = j; i < cost.size(); i += threads ) - { - if ( target_attained ) - { - return; - } - if ( cancellation && *cancellation ) - { - return; - } - - size_t r1, r2, r3; - do - { - r1 = tlg() % NP; - } while ( r1 == i ); - - do - { - r2 = tlg() % NP; - } while ( r2 == i || r2 == r1 ); - - do - { - r3 = tlg() % NP; - } while ( r3 == i || r3 == r2 || r3 == r1 ); - - - for ( size_t k = 0; k < dimension; ++k ) - { - // See equation (4) of the reference: - auto guaranteed_changed_idx = tlg() % dimension; - if ( unif01( tlg ) < de_params.crossover_probability || k == guaranteed_changed_idx ) - { - auto tmp = population[r1][k] + de_params.mutation_factor * ( population[r2][k] - population[r3][k] ); - auto const &lb = de_params.lower_bounds[k]; - auto const &ub = de_params.upper_bounds[k]; - // Some others recommend regenerating the indices rather than clamping; - // I dunno seems like it could get stuck regenerating . . . - trial_vectors[i][k] = std::clamp( tmp, lb, ub ); - } - else - { - trial_vectors[i][k] = population[i][k]; - } - } - - auto const trial_cost = cost_function( trial_vectors[i] ); - if ( FloatUtils::isNan( trial_cost ) ) - { - continue; - } - if ( queries ) - { - std::scoped_lock lock( mt ); - queries->push_back( std::make_pair( trial_vectors[i], trial_cost ) ); - } - - if ( trial_cost < cost[i] || FloatUtils::isNan( cost[i] ) ) - { - cost[i] = trial_cost; - if ( !FloatUtils::isNan( target_value ) && cost[i] <= target_value ) - { - target_attained = true; - } - - if ( current_minimum_cost && cost[i] < *current_minimum_cost ) - { - *current_minimum_cost = cost[i]; - } - - // Can't do this! It's a race condition! - //population[i] = trial_vectors[i]; - // Instead mark all the indices that need to be updated: - updated_indices[i] = 1; - } - } - }); - } - for ( auto &thread : thread_pool ) - { - thread.join(); - } - - for ( size_t i = 0; i < NP; ++i ) - { - if ( updated_indices[i] ) - { - population[i] = trial_vectors[i]; - updated_indices[i] = 0; - } - } - } - - auto it = std::min_element( cost.begin(), cost.end() ); - return population[std::distance( cost.begin(), it )]; } + auto it = std::min_element( cost.begin(), cost.end() ); + return population[std::distance( cost.begin(), it )]; +} + } // namespace NLR #endif diff --git a/src/nlr/Engine.cpp b/src/nlr/Engine.cpp new file mode 100644 index 0000000000..e137c3cbb9 --- /dev/null +++ b/src/nlr/Engine.cpp @@ -0,0 +1,3813 @@ +/********************* */ +/*! \file Engine.cpp + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Duligur Ibeling, Andrew Wu, Omri Isac + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** \brief [[ Add one-line brief description here ]] + ** + ** [[ Add lengthier description here ]] + **/ + +#include "Engine.h" + +#include "AutoConstraintMatrixAnalyzer.h" +#include "Debug.h" +#include "DisjunctionConstraint.h" +#include "EngineState.h" +#include "InfeasibleQueryException.h" +#include "MStringf.h" +#include "MalformedBasisException.h" +#include "MarabouError.h" +#include "NLRError.h" +#include "PiecewiseLinearConstraint.h" +#include "Preprocessor.h" +#include "Query.h" +#include "TableauRow.h" +#include "TimeUtils.h" +#include "VariableOutOfBoundDuringOptimizationException.h" +#include "Vector.h" + +#include + +Engine::Engine() + : _context() + , _boundManager( _context ) + , _tableau( _boundManager ) + , _preprocessedQuery( nullptr ) + , _rowBoundTightener( *_tableau ) + , _smtCore( this ) + , _numPlConstraintsDisabledByValidSplits( 0 ) + , _preprocessingEnabled( false ) + , _initialStateStored( false ) + , _work( NULL ) + , _basisRestorationRequired( Engine::RESTORATION_NOT_NEEDED ) + , _basisRestorationPerformed( Engine::NO_RESTORATION_PERFORMED ) + , _costFunctionManager( _tableau ) + , _quitRequested( false ) + , _exitCode( Engine::NOT_DONE ) + , _numVisitedStatesAtPreviousRestoration( 0 ) + , _networkLevelReasoner( NULL ) + , _verbosity( Options::get()->getInt( Options::VERBOSITY ) ) + , _lastNumVisitedStates( 0 ) + , _lastIterationWithProgress( 0 ) + , _symbolicBoundTighteningType( Options::get()->getSymbolicBoundTighteningType() ) + , _solveWithMILP( Options::get()->getBool( Options::SOLVE_WITH_MILP ) ) + , _lpSolverType( Options::get()->getLPSolverType() ) + , _gurobi( nullptr ) + , _milpEncoder( nullptr ) + , _soiManager( nullptr ) + , _simulationSize( Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ) ) + , _isGurobyEnabled( Options::get()->gurobiEnabled() ) + , _performLpTighteningAfterSplit( + Options::get()->getBool( Options::PERFORM_LP_TIGHTENING_AFTER_SPLIT ) ) + , _milpSolverBoundTighteningType( Options::get()->getMILPSolverBoundTighteningType() ) + , _sncMode( false ) + , _queryId( "" ) + , _produceUNSATProofs( Options::get()->getBool( Options::PRODUCE_PROOFS ) ) + , _groundBoundManager( _context ) + , _UNSATCertificate( NULL ) +{ + _smtCore.setStatistics( &_statistics ); + _tableau->setStatistics( &_statistics ); + _rowBoundTightener->setStatistics( &_statistics ); + _preprocessor.setStatistics( &_statistics ); + + _activeEntryStrategy = _projectedSteepestEdgeRule; + _activeEntryStrategy->setStatistics( &_statistics ); + _statistics.stampStartingTime(); + setRandomSeed( Options::get()->getInt( Options::SEED ) ); + + _boundManager.registerEngine( this ); + _groundBoundManager.registerEngine( this ); + _statisticsPrintingFrequency = ( _lpSolverType == LPSolverType::NATIVE ) + ? GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY + : GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY_GUROBI; + + _UNSATCertificateCurrentPointer = + _produceUNSATProofs ? new ( true ) + CVC4::context::CDO( &_context, NULL ) + : NULL; +} + +Engine::~Engine() +{ + if ( _work ) + { + delete[] _work; + _work = NULL; + } + + if ( _UNSATCertificate ) + { + delete _UNSATCertificate; + _UNSATCertificate = NULL; + } + + if ( _produceUNSATProofs && _UNSATCertificateCurrentPointer ) + _UNSATCertificateCurrentPointer->deleteSelf(); +} + +void Engine::setVerbosity( unsigned verbosity ) +{ + _verbosity = verbosity; +} + +void Engine::adjustWorkMemorySize() +{ + if ( _work ) + { + delete[] _work; + _work = NULL; + } + + _work = new double[_tableau->getM()]; + if ( !_work ) + throw MarabouError( MarabouError::ALLOCATION_FAILED, "Engine::work" ); +} + +void Engine::applySnCSplit( PiecewiseLinearCaseSplit sncSplit, String queryId ) +{ + _sncMode = true; + _sncSplit = sncSplit; + _queryId = queryId; + preContextPushHook(); + _smtCore.pushContext(); + applySplit( sncSplit ); + _boundManager.propagateTightenings(); +} + +bool Engine::inSnCMode() const +{ + return _sncMode; +} + +void Engine::setRandomSeed( unsigned seed ) +{ + srand( seed ); +} + +Query Engine::prepareSnCQuery() +{ + List bounds = _sncSplit.getBoundTightenings(); + List equations = _sncSplit.getEquations(); + + Query sncIPQ = *_preprocessedQuery; + for ( auto &equation : equations ) + sncIPQ.addEquation( equation ); + + for ( auto &bound : bounds ) + { + switch ( bound._type ) + { + case Tightening::LB: + sncIPQ.setLowerBound( bound._variable, bound._value ); + break; + + case Tightening::UB: + sncIPQ.setUpperBound( bound._variable, bound._value ); + } + } + + return sncIPQ; +} + +void Engine::exportQueryWithError( String errorMessage ) +{ + String ipqFileName = ( _queryId.length() > 0 ) ? _queryId + ".ipq" : "failedInputQuery.ipq"; + prepareSnCQuery().saveQuery( ipqFileName ); + printf( "Engine: %s!\nInput query has been saved as %s. Please attach the input query when you " + "open the issue on GitHub.\n", + errorMessage.ascii(), + ipqFileName.ascii() ); +} + +bool Engine::solve( double timeoutInSeconds ) +{ + SignalHandler::getInstance()->initialize(); + SignalHandler::getInstance()->registerClient( this ); + + // Register the boundManager with all the PL constraints + for ( auto &plConstraint : _plConstraints ) + plConstraint->registerBoundManager( &_boundManager ); + for ( auto &nlConstraint : _nlConstraints ) + nlConstraint->registerBoundManager( &_boundManager ); + + // Before encoding, make sure all valid constraints are applied. + applyAllValidConstraintCaseSplits(); + + if ( _solveWithMILP ) + return solveWithMILPEncoding( timeoutInSeconds ); + + updateDirections(); + if ( _lpSolverType == LPSolverType::NATIVE ) + storeInitialEngineState(); + else if ( _lpSolverType == LPSolverType::GUROBI ) + { + ENGINE_LOG( "Encoding convex relaxation into Gurobi..." ); + _gurobi = std::unique_ptr( new GurobiWrapper() ); + _tableau->setGurobi( &( *_gurobi ) ); + _milpEncoder = std::unique_ptr( new MILPEncoder( *_tableau ) ); + _milpEncoder->setStatistics( &_statistics ); + _milpEncoder->encodeQuery( *_gurobi, *_preprocessedQuery, true ); + ENGINE_LOG( "Encoding convex relaxation into Gurobi - done" ); + } + + mainLoopStatistics(); + if ( _verbosity > 0 ) + { + printf( "\nEngine::solve: Initial statistics\n" ); + _statistics.print(); + printf( "\n---\n" ); + } + + bool splitJustPerformed = true; + struct timespec mainLoopStart = TimeUtils::sampleMicro(); + while ( true ) + { + struct timespec mainLoopEnd = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, + TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); + mainLoopStart = mainLoopEnd; + + if ( shouldExitDueToTimeout( timeoutInSeconds ) ) + { + if ( _verbosity > 0 ) + { + printf( "\n\nEngine: quitting due to timeout...\n\n" ); + printf( "Final statistics:\n" ); + _statistics.print(); + } + + _exitCode = Engine::TIMEOUT; + _statistics.timeout(); + return false; + } + + if ( _quitRequested ) + { + if ( _verbosity > 0 ) + { + printf( "\n\nEngine: quitting due to external request...\n\n" ); + printf( "Final statistics:\n" ); + _statistics.print(); + } + + _exitCode = Engine::QUIT_REQUESTED; + return false; + } + + try + { + DEBUG( _tableau->verifyInvariants() ); + + mainLoopStatistics(); + if ( _verbosity > 1 && + _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % + _statisticsPrintingFrequency == + 0 ) + _statistics.print(); + + if ( _lpSolverType == LPSolverType::NATIVE ) + { + checkOverallProgress(); + // Check whether progress has been made recently + + if ( performPrecisionRestorationIfNeeded() ) + continue; + + if ( _tableau->basisMatrixAvailable() ) + { + explicitBasisBoundTightening(); + _boundManager.propagateTightenings(); + applyAllValidConstraintCaseSplits(); + } + } + + // If true, we just entered a new subproblem + if ( splitJustPerformed ) + { + performBoundTighteningAfterCaseSplit(); + informLPSolverOfBounds(); + splitJustPerformed = false; + } + + // Perform any SmtCore-initiated case splits + if ( _smtCore.needToSplit() ) + { + _smtCore.performSplit(); + splitJustPerformed = true; + continue; + } + + if ( !_tableau->allBoundsValid() ) + { + // Some variable bounds are invalid, so the query is unsat + throw InfeasibleQueryException(); + } + + if ( allVarsWithinBounds() ) + { + // It's possible that a disjunction constraint is fixed and additional constraints + // are introduced, making the linear portion unsatisfied. So we need to make sure + // there are no valid case splits that we do not know of. + applyAllBoundTightenings(); + if ( applyAllValidConstraintCaseSplits() ) + continue; + + // The linear portion of the problem has been solved. + // Check the status of the PL constraints + bool solutionFound = adjustAssignmentToSatisfyNonLinearConstraints(); + if ( solutionFound ) + { + if ( allNonlinearConstraintsHold() ) + { + mainLoopEnd = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( + Statistics::TIME_MAIN_LOOP_MICRO, + TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); + if ( _verbosity > 0 ) + { + printf( "\nEngine::solve: sat assignment found\n" ); + _statistics.print(); + } + + // Allows checking proofs produced for UNSAT leaves of satisfiable query + // search tree + if ( _produceUNSATProofs ) + { + ASSERT( _UNSATCertificateCurrentPointer ); + ( **_UNSATCertificateCurrentPointer ).setSATSolutionFlag(); + } + _exitCode = Engine::SAT; + return true; + } + else if ( !hasBranchingCandidate() ) + { + mainLoopEnd = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( + Statistics::TIME_MAIN_LOOP_MICRO, + TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); + if ( _verbosity > 0 ) + { + printf( "\nEngine::solve: at leaf node but solving inconclusive\n" ); + _statistics.print(); + } + _exitCode = Engine::UNKNOWN; + return false; + } + else + { + while ( !_smtCore.needToSplit() ) + _smtCore.reportRejectedPhasePatternProposal(); + continue; + } + } + else + { + continue; + } + } + + // We have out-of-bounds variables. + if ( _lpSolverType == LPSolverType::NATIVE ) + performSimplexStep(); + else + { + ENGINE_LOG( "Checking LP feasibility with Gurobi..." ); + DEBUG( { checkGurobiBoundConsistency(); } ); + ASSERT( _lpSolverType == LPSolverType::GUROBI ); + LinearExpression dontCare; + minimizeCostWithGurobi( dontCare ); + } + continue; + } + catch ( const MalformedBasisException & ) + { + _tableau->toggleOptimization( false ); + if ( !handleMalformedBasisException() ) + { + ASSERT( _lpSolverType == LPSolverType::NATIVE ); + _exitCode = Engine::ERROR; + exportQueryWithError( "Cannot restore tableau" ); + mainLoopEnd = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, + TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); + return false; + } + } + catch ( const InfeasibleQueryException & ) + { + _tableau->toggleOptimization( false ); + // The current query is unsat, and we need to pop. + // If we're at level 0, the whole query is unsat. + if ( _produceUNSATProofs ) + explainSimplexFailure(); + + if ( !_smtCore.popSplit() ) + { + mainLoopEnd = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, + TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); + if ( _verbosity > 0 ) + { + printf( "\nEngine::solve: unsat query\n" ); + _statistics.print(); + } + _exitCode = Engine::UNSAT; + return false; + } + else + { + splitJustPerformed = true; + } + } + catch ( const VariableOutOfBoundDuringOptimizationException & ) + { + _tableau->toggleOptimization( false ); + continue; + } + catch ( MarabouError &e ) + { + String message = Stringf( + "Caught a MarabouError. Code: %u. Message: %s ", e.getCode(), e.getUserMessage() ); + _exitCode = Engine::ERROR; + exportQueryWithError( message ); + mainLoopEnd = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, + TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); + return false; + } + catch ( ... ) + { + _exitCode = Engine::ERROR; + exportQueryWithError( "Unknown error" ); + mainLoopEnd = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, + TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); + return false; + } + } +} + +void Engine::mainLoopStatistics() +{ + struct timespec start = TimeUtils::sampleMicro(); + + unsigned activeConstraints = 0; + for ( const auto &constraint : _plConstraints ) + if ( constraint->isActive() ) + ++activeConstraints; + + _statistics.setUnsignedAttribute( Statistics::NUM_ACTIVE_PL_CONSTRAINTS, activeConstraints ); + _statistics.setUnsignedAttribute( Statistics::NUM_PL_VALID_SPLITS, + _numPlConstraintsDisabledByValidSplits ); + _statistics.setUnsignedAttribute( Statistics::NUM_PL_SMT_ORIGINATED_SPLITS, + _plConstraints.size() - activeConstraints - + _numPlConstraintsDisabledByValidSplits ); + + _statistics.incLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_HANDLING_STATISTICS_MICRO, + TimeUtils::timePassed( start, end ) ); +} + +void Engine::performBoundTighteningAfterCaseSplit() +{ + // Tighten bounds of a first hidden layer with MILP solver + performMILPSolverBoundedTighteningForSingleLayer( 1 ); + do + { + performSymbolicBoundTightening(); + } + while ( applyAllValidConstraintCaseSplits() ); + + // Tighten bounds of an output layer with MILP solver + if ( _networkLevelReasoner ) // to avoid failing of system test. + performMILPSolverBoundedTighteningForSingleLayer( + _networkLevelReasoner->getLayerIndexToLayer().size() - 1 ); +} + +bool Engine::adjustAssignmentToSatisfyNonLinearConstraints() +{ + ENGINE_LOG( "Linear constraints satisfied. Now trying to satisfy non-linear" + " constraints..." ); + collectViolatedPlConstraints(); + + // If all constraints are satisfied, we are possibly done + if ( allPlConstraintsHold() ) + { + if ( _lpSolverType == LPSolverType::NATIVE && + _tableau->getBasicAssignmentStatus() != ITableau::BASIC_ASSIGNMENT_JUST_COMPUTED ) + { + if ( _verbosity > 0 ) + { + printf( "Before declaring sat, recomputing...\n" ); + } + // Make sure that the assignment is precise before declaring success + _tableau->computeAssignment(); + // If we actually have a real satisfying assignment, + return false; + } + else + return true; + } + else if ( !GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH ) + { + // We have violated piecewise-linear constraints. + performConstraintFixingStep(); + + // Finally, take this opporunity to tighten any bounds + // and perform any valid case splits. + tightenBoundsOnConstraintMatrix(); + _boundManager.propagateTightenings(); + // For debugging purposes + checkBoundCompliancyWithDebugSolution(); + + while ( applyAllValidConstraintCaseSplits() ) + performSymbolicBoundTightening(); + return false; + } + else + { + return performDeepSoILocalSearch(); + } +} + +bool Engine::performPrecisionRestorationIfNeeded() +{ + // If the basis has become malformed, we need to restore it + if ( basisRestorationNeeded() ) + { + if ( _basisRestorationRequired == Engine::STRONG_RESTORATION_NEEDED ) + { + performPrecisionRestoration( PrecisionRestorer::RESTORE_BASICS ); + _basisRestorationPerformed = Engine::PERFORMED_STRONG_RESTORATION; + } + else + { + performPrecisionRestoration( PrecisionRestorer::DO_NOT_RESTORE_BASICS ); + _basisRestorationPerformed = Engine::PERFORMED_WEAK_RESTORATION; + } + + _numVisitedStatesAtPreviousRestoration = + _statistics.getUnsignedAttribute( Statistics::NUM_VISITED_TREE_STATES ); + _basisRestorationRequired = Engine::RESTORATION_NOT_NEEDED; + return true; + } + + // Restoration is not required + _basisRestorationPerformed = Engine::NO_RESTORATION_PERFORMED; + + // Possible restoration due to preceision degradation + if ( shouldCheckDegradation() && highDegradation() ) + { + performPrecisionRestoration( PrecisionRestorer::RESTORE_BASICS ); + return true; + } + + return false; +} + +bool Engine::handleMalformedBasisException() +{ + // Debug + printf( "MalformedBasisException caught!\n" ); + // + + if ( _basisRestorationPerformed == Engine::NO_RESTORATION_PERFORMED ) + { + if ( _numVisitedStatesAtPreviousRestoration != + _statistics.getUnsignedAttribute( Statistics::NUM_VISITED_TREE_STATES ) ) + { + // We've tried a strong restoration before, and it didn't work. Do a weak restoration + _basisRestorationRequired = Engine::WEAK_RESTORATION_NEEDED; + } + else + { + _basisRestorationRequired = Engine::STRONG_RESTORATION_NEEDED; + } + return true; + } + else if ( _basisRestorationPerformed == Engine::PERFORMED_STRONG_RESTORATION ) + { + _basisRestorationRequired = Engine::WEAK_RESTORATION_NEEDED; + return true; + } + else + return false; +} + +void Engine::performConstraintFixingStep() +{ + // Statistics + _statistics.incLongAttribute( Statistics::NUM_CONSTRAINT_FIXING_STEPS ); + struct timespec start = TimeUtils::sampleMicro(); + + // Select a violated constraint as the target + selectViolatedPlConstraint(); + + // Report the violated constraint to the SMT engine + reportPlViolation(); + + // Attempt to fix the constraint + fixViolatedPlConstraintIfPossible(); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_CONSTRAINT_FIXING_STEPS_MICRO, + TimeUtils::timePassed( start, end ) ); +} + +bool Engine::performSimplexStep() +{ + // Statistics + _statistics.incLongAttribute( Statistics::NUM_SIMPLEX_STEPS ); + struct timespec start = TimeUtils::sampleMicro(); + + /* + In order to increase numerical stability, we attempt to pick a + "good" entering/leaving combination, by trying to avoid tiny pivot + values. We do this as follows: + + 1. Pick an entering variable according to the strategy in use. + 2. Find the entailed leaving variable. + 3. If the combination is bad, go back to (1) and find the + next-best entering variable. + */ + + if ( _tableau->isOptimizing() ) + _costFunctionManager->computeGivenCostFunction( _heuristicCost._addends ); + if ( _costFunctionManager->costFunctionInvalid() ) + _costFunctionManager->computeCoreCostFunction(); + else + _costFunctionManager->adjustBasicCostAccuracy(); + + DEBUG( { + // Since we're performing a simplex step, there are out-of-bounds variables. + // Therefore, if the cost function is fresh, it should not be zero. + if ( _costFunctionManager->costFunctionJustComputed() ) + { + const double *costFunction = _costFunctionManager->getCostFunction(); + unsigned size = _tableau->getN() - _tableau->getM(); + bool found = false; + for ( unsigned i = 0; i < size; ++i ) + { + if ( !FloatUtils::isZero( costFunction[i] ) ) + { + found = true; + break; + } + } + + if ( !found ) + { + printf( "Error! Have OOB vars but cost function is zero.\n" + "Recomputing cost function. New one is:\n" ); + _costFunctionManager->computeCoreCostFunction(); + _costFunctionManager->dumpCostFunction(); + throw MarabouError( MarabouError::DEBUGGING_ERROR, + "Have OOB vars but cost function is zero" ); + } + } + } ); + + // Obtain all eligible entering variables + List enteringVariableCandidates; + _tableau->getEntryCandidates( enteringVariableCandidates ); + + unsigned bestLeaving = 0; + double bestChangeRatio = 0.0; + Set excludedEnteringVariables; + bool haveCandidate = false; + unsigned bestEntering = 0; + double bestPivotEntry = 0.0; + unsigned tries = GlobalConfiguration::MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS; + + while ( tries > 0 ) + { + --tries; + + // Attempt to pick the best entering variable from the available candidates + if ( !_activeEntryStrategy->select( + _tableau, enteringVariableCandidates, excludedEnteringVariables ) ) + { + // No additional candidates can be found. + break; + } + + // We have a candidate! + haveCandidate = true; + + // We don't want to re-consider this candidate in future + // iterations + excludedEnteringVariables.insert( _tableau->getEnteringVariableIndex() ); + + // Pick a leaving variable + _tableau->computeChangeColumn(); + _tableau->pickLeavingVariable(); + + // A fake pivot always wins + if ( _tableau->performingFakePivot() ) + { + bestEntering = _tableau->getEnteringVariableIndex(); + bestLeaving = _tableau->getLeavingVariableIndex(); + bestChangeRatio = _tableau->getChangeRatio(); + memcpy( _work, _tableau->getChangeColumn(), sizeof( double ) * _tableau->getM() ); + break; + } + + // Is the newly found pivot better than the stored one? + unsigned leavingIndex = _tableau->getLeavingVariableIndex(); + double pivotEntry = FloatUtils::abs( _tableau->getChangeColumn()[leavingIndex] ); + if ( pivotEntry > bestPivotEntry ) + { + bestEntering = _tableau->getEnteringVariableIndex(); + bestPivotEntry = pivotEntry; + bestLeaving = leavingIndex; + bestChangeRatio = _tableau->getChangeRatio(); + memcpy( _work, _tableau->getChangeColumn(), sizeof( double ) * _tableau->getM() ); + } + + // If the pivot is greater than the sought-after threshold, we + // are done. + if ( bestPivotEntry >= GlobalConfiguration::ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD ) + break; + else + _statistics.incLongAttribute( + Statistics::NUM_SIMPLEX_PIVOT_SELECTIONS_IGNORED_FOR_STABILITY ); + } + + // If we don't have any candidates, this simplex step has failed. + if ( !haveCandidate ) + { + if ( _tableau->getBasicAssignmentStatus() != ITableau::BASIC_ASSIGNMENT_JUST_COMPUTED ) + { + // This failure might have resulted from a corrupt basic assignment. + _tableau->computeAssignment(); + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, + TimeUtils::timePassed( start, end ) ); + return false; + } + else if ( !_costFunctionManager->costFunctionJustComputed() ) + { + // This failure might have resulted from a corrupt cost function. + ASSERT( _costFunctionManager->getCostFunctionStatus() == + ICostFunctionManager::COST_FUNCTION_UPDATED ); + _costFunctionManager->invalidateCostFunction(); + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, + TimeUtils::timePassed( start, end ) ); + return false; + } + else + { + // Cost function is fresh --- failure is real. + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, + TimeUtils::timePassed( start, end ) ); + if ( _tableau->isOptimizing() ) + { + // The current solution is optimal. + return true; + } + else + throw InfeasibleQueryException(); + } + } + + // Set the best choice in the tableau + _tableau->setEnteringVariableIndex( bestEntering ); + _tableau->setLeavingVariableIndex( bestLeaving ); + _tableau->setChangeColumn( _work ); + _tableau->setChangeRatio( bestChangeRatio ); + + bool fakePivot = _tableau->performingFakePivot(); + + if ( !fakePivot && bestPivotEntry < GlobalConfiguration::ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD ) + { + /* + Despite our efforts, we are stuck with a small pivot. If basis factorization + isn't fresh, refresh it and terminate this step - perhaps in the next iteration + a better pivot will be found + */ + if ( !_tableau->basisMatrixAvailable() ) + { + _tableau->refreshBasisFactorization(); + return false; + } + + _statistics.incLongAttribute( Statistics::NUM_SIMPLEX_UNSTABLE_PIVOTS ); + } + + if ( !fakePivot ) + { + _tableau->computePivotRow(); + _rowBoundTightener->examinePivotRow(); + } + + // Perform the actual pivot + _activeEntryStrategy->prePivotHook( _tableau, fakePivot ); + _tableau->performPivot(); + _activeEntryStrategy->postPivotHook( _tableau, fakePivot ); + _boundManager.propagateTightenings(); + _costFunctionManager->invalidateCostFunction(); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, + TimeUtils::timePassed( start, end ) ); + return false; +} + +void Engine::fixViolatedPlConstraintIfPossible() +{ + List fixes; + + if ( GlobalConfiguration::USE_SMART_FIX ) + fixes = _plConstraintToFix->getSmartFixes( _tableau ); + else + fixes = _plConstraintToFix->getPossibleFixes(); + + // First, see if we can fix without pivoting. We are looking for a fix concerning a + // non-basic variable, that doesn't set that variable out-of-bounds. + for ( const auto &fix : fixes ) + { + if ( !_tableau->isBasic( fix._variable ) ) + { + if ( _tableau->checkValueWithinBounds( fix._variable, fix._value ) ) + { + _tableau->setNonBasicAssignment( fix._variable, fix._value, true ); + return; + } + } + } + + // No choice, have to pivot. Look for a fix concerning a basic variable, that + // doesn't set that variable out-of-bounds. If smart-fix is enabled and implemented, + // we should probably not reach this point. + bool found = false; + auto it = fixes.begin(); + while ( !found && it != fixes.end() ) + { + if ( _tableau->isBasic( it->_variable ) ) + { + if ( _tableau->checkValueWithinBounds( it->_variable, it->_value ) ) + { + found = true; + } + } + if ( !found ) + { + ++it; + } + } + + // If we couldn't find an eligible fix, give up + if ( !found ) + return; + + PiecewiseLinearConstraint::Fix fix = *it; + ASSERT( _tableau->isBasic( fix._variable ) ); + + TableauRow row( _tableau->getN() - _tableau->getM() ); + _tableau->getTableauRow( _tableau->variableToIndex( fix._variable ), &row ); + + // Pick the variable with the largest coefficient in this row for pivoting, + // to increase numerical stability. + unsigned bestCandidate = row._row[0]._var; + double bestValue = FloatUtils::abs( row._row[0]._coefficient ); + + unsigned n = _tableau->getN(); + unsigned m = _tableau->getM(); + for ( unsigned i = 1; i < n - m; ++i ) + { + double contenderValue = FloatUtils::abs( row._row[i]._coefficient ); + if ( FloatUtils::gt( contenderValue, bestValue ) ) + { + bestValue = contenderValue; + bestCandidate = row._row[i]._var; + } + } + + if ( FloatUtils::isZero( bestValue ) ) + { + // This can happen, e.g., if we have an equation x = 5, and is legal behavior. + return; + } + + // Switch between nonBasic and the variable we need to fix + _tableau->setEnteringVariableIndex( _tableau->variableToIndex( bestCandidate ) ); + _tableau->setLeavingVariableIndex( _tableau->variableToIndex( fix._variable ) ); + + // Make sure the change column and pivot row are up-to-date - strategies + // such as projected steepest edge need these for their internal updates. + _tableau->computeChangeColumn(); + _tableau->computePivotRow(); + + _activeEntryStrategy->prePivotHook( _tableau, false ); + _tableau->performDegeneratePivot(); + _activeEntryStrategy->postPivotHook( _tableau, false ); + + ASSERT( !_tableau->isBasic( fix._variable ) ); + _tableau->setNonBasicAssignment( fix._variable, fix._value, true ); +} + +bool Engine::processInputQuery( const IQuery &inputQuery ) +{ + return processInputQuery( inputQuery, GlobalConfiguration::PREPROCESS_INPUT_QUERY ); +} + +bool Engine::calculateBounds( const IQuery &inputQuery ) +{ + ENGINE_LOG( "calculateBounds starting\n" ); + struct timespec start = TimeUtils::sampleMicro(); + + try + { + invokePreprocessor( inputQuery, true ); + if ( _verbosity > 1 ) + printInputBounds( inputQuery ); + + initializeNetworkLevelReasoning(); + + performSymbolicBoundTightening( &( *_preprocessedQuery ) ); + performSimulation(); + performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); + performAdditionalBackwardAnalysisIfNeeded(); + + if ( _networkLevelReasoner && Options::get()->getBool( Options::DUMP_BOUNDS ) ) + _networkLevelReasoner->dumpBounds(); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.setLongAttribute( Statistics::CALCULATE_BOUNDS_TIME_MICRO, + TimeUtils::timePassed( start, end ) ); + if ( !_tableau->allBoundsValid() ) + { + // Some variable bounds are invalid, so the query is unsat + throw InfeasibleQueryException(); + } + } + catch ( const InfeasibleQueryException & ) + { + ENGINE_LOG( "calculateBounds done\n" ); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.setLongAttribute( Statistics::CALCULATE_BOUNDS_TIME_MICRO, + TimeUtils::timePassed( start, end ) ); + + _exitCode = Engine::UNSAT; + printf( "unsat\n" ); + + return false; + } + + ENGINE_LOG( "calculateBounds done\n" ); + + return true; +} + +void Engine::invokePreprocessor( const IQuery &inputQuery, bool preprocess ) +{ + if ( _verbosity > 0 ) + printf( "Engine::processInputQuery: Input query (before preprocessing): " + "%u equations, %u variables\n", + inputQuery.getNumberOfEquations(), + inputQuery.getNumberOfVariables() ); + + // If processing is enabled, invoke the preprocessor + _preprocessingEnabled = preprocess; + if ( _preprocessingEnabled ) + _preprocessedQuery = _preprocessor.preprocess( + inputQuery, GlobalConfiguration::PREPROCESSOR_ELIMINATE_VARIABLES ); + else + { + _preprocessedQuery = std::unique_ptr( inputQuery.generateQuery() ); + Preprocessor().informConstraintsOfInitialBounds( *_preprocessedQuery ); + } + + if ( _verbosity > 0 ) + printf( "Engine::processInputQuery: Input query (after preprocessing): " + "%u equations, %u variables\n\n", + _preprocessedQuery->getNumberOfEquations(), + _preprocessedQuery->getNumberOfVariables() ); + + unsigned infiniteBounds = _preprocessedQuery->countInfiniteBounds(); + if ( infiniteBounds != 0 ) + { + _exitCode = Engine::ERROR; + throw MarabouError( MarabouError::UNBOUNDED_VARIABLES_NOT_YET_SUPPORTED, + Stringf( "Error! Have %u infinite bounds", infiniteBounds ).ascii() ); + } +} + +void Engine::printInputBounds( const IQuery &inputQuery ) const +{ + printf( "Input bounds:\n" ); + for ( unsigned i = 0; i < inputQuery.getNumInputVariables(); ++i ) + { + unsigned variable = inputQuery.inputVariableByIndex( i ); + double lb, ub; + bool fixed = false; + if ( _preprocessingEnabled ) + { + // Fixed variables are easy: return the value they've been fixed to. + if ( _preprocessor.variableIsFixed( variable ) ) + { + fixed = true; + lb = _preprocessor.getFixedValue( variable ); + ub = lb; + } + else + { + // Has the variable been merged into another? + while ( _preprocessor.variableIsMerged( variable ) ) + variable = _preprocessor.getMergedIndex( variable ); + + // We know which variable to look for, but it may have been assigned + // a new index, due to variable elimination + variable = _preprocessor.getNewIndex( variable ); + + lb = _preprocessedQuery->getLowerBound( variable ); + ub = _preprocessedQuery->getUpperBound( variable ); + } + } + else + { + lb = inputQuery.getLowerBound( variable ); + ub = inputQuery.getUpperBound( variable ); + } + + printf( "\tx%u: [%8.4lf, %8.4lf] %s\n", i, lb, ub, fixed ? "[FIXED]" : "" ); + } + printf( "\n" ); +} + +void Engine::storeEquationsInDegradationChecker() +{ + _degradationChecker.storeEquations( *_preprocessedQuery ); +} + +double *Engine::createConstraintMatrix() +{ + const List &equations( _preprocessedQuery->getEquations() ); + unsigned m = equations.size(); + unsigned n = _preprocessedQuery->getNumberOfVariables(); + + // Step 1: create a constraint matrix from the equations + double *constraintMatrix = new double[n * m]; + if ( !constraintMatrix ) + throw MarabouError( MarabouError::ALLOCATION_FAILED, "Engine::constraintMatrix" ); + std::fill_n( constraintMatrix, n * m, 0.0 ); + + unsigned equationIndex = 0; + for ( const auto &equation : equations ) + { + if ( equation._type != Equation::EQ ) + { + _exitCode = Engine::ERROR; + throw MarabouError( MarabouError::NON_EQUALITY_INPUT_EQUATION_DISCOVERED ); + } + + for ( const auto &addend : equation._addends ) + constraintMatrix[equationIndex * n + addend._variable] = addend._coefficient; + + ++equationIndex; + } + + return constraintMatrix; +} + +void Engine::removeRedundantEquations( const double *constraintMatrix ) +{ + const List &equations( _preprocessedQuery->getEquations() ); + unsigned m = equations.size(); + unsigned n = _preprocessedQuery->getNumberOfVariables(); + + // Step 1: analyze the matrix to identify redundant rows + AutoConstraintMatrixAnalyzer analyzer; + analyzer->analyze( constraintMatrix, m, n ); + + ENGINE_LOG( + Stringf( "Number of redundant rows: %u out of %u", analyzer->getRedundantRows().size(), m ) + .ascii() ); + + // Step 2: remove any equations corresponding to redundant rows + Set redundantRows = analyzer->getRedundantRows(); + + if ( !redundantRows.empty() ) + { + _preprocessedQuery->removeEquationsByIndex( redundantRows ); + m = equations.size(); + } +} + +void Engine::selectInitialVariablesForBasis( const double *constraintMatrix, + List &initialBasis, + List &basicRows ) +{ + /* + This method permutes rows and columns in the constraint matrix (prior + to the addition of auxiliary variables), in order to obtain a set of + column that constitue a lower triangular matrix. The variables + corresponding to the columns of this matrix join the initial basis. + + (It is possible that not enough variables are obtained this way, in which + case the initial basis will have to be augmented later). + */ + + const List &equations( _preprocessedQuery->getEquations() ); + + unsigned m = equations.size(); + unsigned n = _preprocessedQuery->getNumberOfVariables(); + + // Trivial case, or if a trivial basis is requested + if ( ( m == 0 ) || ( n == 0 ) || GlobalConfiguration::ONLY_AUX_INITIAL_BASIS ) + { + for ( unsigned i = 0; i < m; ++i ) + basicRows.append( i ); + + return; + } + + unsigned *nnzInRow = new unsigned[m]; + unsigned *nnzInColumn = new unsigned[n]; + + std::fill_n( nnzInRow, m, 0 ); + std::fill_n( nnzInColumn, n, 0 ); + + unsigned *columnOrdering = new unsigned[n]; + unsigned *rowOrdering = new unsigned[m]; + + for ( unsigned i = 0; i < m; ++i ) + rowOrdering[i] = i; + + for ( unsigned i = 0; i < n; ++i ) + columnOrdering[i] = i; + + // Initialize the counters + for ( unsigned i = 0; i < m; ++i ) + { + for ( unsigned j = 0; j < n; ++j ) + { + if ( !FloatUtils::isZero( constraintMatrix[i * n + j] ) ) + { + ++nnzInRow[i]; + ++nnzInColumn[j]; + } + } + } + + DEBUG( { + for ( unsigned i = 0; i < m; ++i ) + { + ASSERT( nnzInRow[i] > 0 ); + } + } ); + + unsigned numExcluded = 0; + unsigned numTriangularRows = 0; + unsigned temp; + + while ( numExcluded + numTriangularRows < n ) + { + // Do we have a singleton row? + unsigned singletonRow = m; + for ( unsigned i = numTriangularRows; i < m; ++i ) + { + if ( nnzInRow[i] == 1 ) + { + singletonRow = i; + break; + } + } + + if ( singletonRow < m ) + { + // Have a singleton row! Swap it to the top and update counters + temp = rowOrdering[singletonRow]; + rowOrdering[singletonRow] = rowOrdering[numTriangularRows]; + rowOrdering[numTriangularRows] = temp; + + temp = nnzInRow[numTriangularRows]; + nnzInRow[numTriangularRows] = nnzInRow[singletonRow]; + nnzInRow[singletonRow] = temp; + + // Find the non-zero entry in the row and swap it to the diagonal + DEBUG( bool foundNonZero = false ); + for ( unsigned i = numTriangularRows; i < n - numExcluded; ++i ) + { + if ( !FloatUtils::isZero( constraintMatrix[rowOrdering[numTriangularRows] * n + + columnOrdering[i]] ) ) + { + temp = columnOrdering[i]; + columnOrdering[i] = columnOrdering[numTriangularRows]; + columnOrdering[numTriangularRows] = temp; + + temp = nnzInColumn[numTriangularRows]; + nnzInColumn[numTriangularRows] = nnzInColumn[i]; + nnzInColumn[i] = temp; + + DEBUG( foundNonZero = true ); + break; + } + } + + ASSERT( foundNonZero ); + + // Remove all entries under the diagonal entry from the row counters + for ( unsigned i = numTriangularRows + 1; i < m; ++i ) + { + if ( !FloatUtils::isZero( constraintMatrix[rowOrdering[i] * n + + columnOrdering[numTriangularRows]] ) ) + --nnzInRow[i]; + } + + ++numTriangularRows; + } + else + { + // No singleton rows. Exclude the densest column + unsigned maxDensity = nnzInColumn[numTriangularRows]; + unsigned column = numTriangularRows; + + for ( unsigned i = numTriangularRows; i < n - numExcluded; ++i ) + { + if ( nnzInColumn[i] > maxDensity ) + { + maxDensity = nnzInColumn[i]; + column = i; + } + } + + // Update the row counters to account for the excluded column + for ( unsigned i = numTriangularRows; i < m; ++i ) + { + double element = constraintMatrix[rowOrdering[i] * n + columnOrdering[column]]; + if ( !FloatUtils::isZero( element ) ) + { + ASSERT( nnzInRow[i] > 1 ); + --nnzInRow[i]; + } + } + + columnOrdering[column] = columnOrdering[n - 1 - numExcluded]; + nnzInColumn[column] = nnzInColumn[n - 1 - numExcluded]; + ++numExcluded; + } + } + + // Final basis: diagonalized columns + non-diagonalized rows + List result; + + for ( unsigned i = 0; i < numTriangularRows; ++i ) + { + initialBasis.append( columnOrdering[i] ); + } + + for ( unsigned i = numTriangularRows; i < m; ++i ) + { + basicRows.append( rowOrdering[i] ); + } + + // Cleanup + delete[] nnzInRow; + delete[] nnzInColumn; + delete[] columnOrdering; + delete[] rowOrdering; +} + +void Engine::addAuxiliaryVariables() +{ + List &equations( _preprocessedQuery->getEquations() ); + + unsigned m = equations.size(); + unsigned originalN = _preprocessedQuery->getNumberOfVariables(); + unsigned n = originalN + m; + + _preprocessedQuery->setNumberOfVariables( n ); + + // Add auxiliary variables to the equations and set their bounds + unsigned count = 0; + for ( auto &eq : equations ) + { + unsigned auxVar = originalN + count; + if ( _produceUNSATProofs ) + _preprocessedQuery->_lastAddendToAux.insert( eq._addends.back()._variable, auxVar ); + eq.addAddend( -1, auxVar ); + _preprocessedQuery->setLowerBound( auxVar, eq._scalar ); + _preprocessedQuery->setUpperBound( auxVar, eq._scalar ); + eq.setScalar( 0 ); + + ++count; + } +} + +void Engine::augmentInitialBasisIfNeeded( List &initialBasis, + const List &basicRows ) +{ + unsigned m = _preprocessedQuery->getEquations().size(); + unsigned n = _preprocessedQuery->getNumberOfVariables(); + unsigned originalN = n - m; + + if ( initialBasis.size() != m ) + { + for ( const auto &basicRow : basicRows ) + initialBasis.append( basicRow + originalN ); + } +} + +void Engine::initializeTableau( const double *constraintMatrix, const List &initialBasis ) +{ + const List &equations( _preprocessedQuery->getEquations() ); + unsigned m = equations.size(); + unsigned n = _preprocessedQuery->getNumberOfVariables(); + + _tableau->setDimensions( m, n ); + + adjustWorkMemorySize(); + + unsigned equationIndex = 0; + for ( const auto &equation : equations ) + { + _tableau->setRightHandSide( equationIndex, equation._scalar ); + ++equationIndex; + } + + // Populate constriant matrix + _tableau->setConstraintMatrix( constraintMatrix ); + + _tableau->registerToWatchAllVariables( _rowBoundTightener ); + _tableau->registerResizeWatcher( _rowBoundTightener ); + + _rowBoundTightener->setDimensions(); + + initializeBoundsAndConstraintWatchersInTableau( n ); + + _tableau->initializeTableau( initialBasis ); + + _costFunctionManager->initialize(); + _tableau->registerCostFunctionManager( _costFunctionManager ); + _activeEntryStrategy->initialize( _tableau ); +} + +void Engine::initializeBoundsAndConstraintWatchersInTableau( unsigned numberOfVariables ) +{ + _plConstraints = _preprocessedQuery->getPiecewiseLinearConstraints(); + for ( const auto &constraint : _plConstraints ) + { + constraint->registerAsWatcher( _tableau ); + constraint->setStatistics( &_statistics ); + + // Assuming aux var is use, add the constraint's auxiliary variable assigned to it in the + // tableau, to the constraint + if ( _produceUNSATProofs ) + for ( unsigned var : constraint->getNativeAuxVars() ) + if ( _preprocessedQuery->_lastAddendToAux.exists( var ) ) + constraint->addTableauAuxVar( _preprocessedQuery->_lastAddendToAux.at( var ), + var ); + } + + _nlConstraints = _preprocessedQuery->getNonlinearConstraints(); + for ( const auto &constraint : _nlConstraints ) + { + constraint->registerAsWatcher( _tableau ); + constraint->setStatistics( &_statistics ); + } + + for ( unsigned i = 0; i < numberOfVariables; ++i ) + { + _tableau->setLowerBound( i, _preprocessedQuery->getLowerBound( i ) ); + _tableau->setUpperBound( i, _preprocessedQuery->getUpperBound( i ) ); + } + _boundManager.storeLocalBounds(); + + _statistics.setUnsignedAttribute( Statistics::NUM_PL_CONSTRAINTS, _plConstraints.size() ); +} + +void Engine::initializeNetworkLevelReasoning() +{ + _networkLevelReasoner = _preprocessedQuery->getNetworkLevelReasoner(); + + if ( _networkLevelReasoner ) + { + _networkLevelReasoner->computeSuccessorLayers(); + _networkLevelReasoner->setTableau( _tableau ); + if ( Options::get()->getBool( Options::DUMP_TOPOLOGY ) ) + { + _networkLevelReasoner->dumpTopology( false ); + std::cout << std::endl; + } + } +} + +bool Engine::processInputQuery( const IQuery &inputQuery, bool preprocess ) +{ + ENGINE_LOG( "processInputQuery starting\n" ); + struct timespec start = TimeUtils::sampleMicro(); + + try + { + invokePreprocessor( inputQuery, preprocess ); + if ( _verbosity > 1 ) + printInputBounds( inputQuery ); + initializeNetworkLevelReasoning(); + if ( preprocess ) + { + performSymbolicBoundTightening( &( *_preprocessedQuery ) ); + performSimulation(); + performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); + performAdditionalBackwardAnalysisIfNeeded(); + } + + if ( GlobalConfiguration::PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING ) + for ( auto &plConstraint : _preprocessedQuery->getPiecewiseLinearConstraints() ) + plConstraint->addAuxiliaryEquationsAfterPreprocessing( *_preprocessedQuery ); + + if ( GlobalConfiguration::NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING ) + for ( auto &nlConstraint : _preprocessedQuery->getNonlinearConstraints() ) + nlConstraint->addAuxiliaryEquationsAfterPreprocessing( *_preprocessedQuery ); + + if ( _produceUNSATProofs ) + { + for ( auto &plConstraint : _preprocessedQuery->getPiecewiseLinearConstraints() ) + { + if ( !UNSATCertificateUtils::getSupportedActivations().exists( + plConstraint->getType() ) ) + { + _produceUNSATProofs = false; + Options::get()->setBool( Options::PRODUCE_PROOFS, false ); + String activationType = + plConstraint->serializeToString().tokenize( "," ).back(); + printf( + "Turning off proof production since activation %s is not yet supported\n", + activationType.ascii() ); + break; + } + } + } + + if ( _lpSolverType == LPSolverType::NATIVE ) + { + double *constraintMatrix = createConstraintMatrix(); + removeRedundantEquations( constraintMatrix ); + + // The equations have changed, recreate the constraint matrix + delete[] constraintMatrix; + constraintMatrix = createConstraintMatrix(); + + List initialBasis; + List basicRows; + selectInitialVariablesForBasis( constraintMatrix, initialBasis, basicRows ); + addAuxiliaryVariables(); + augmentInitialBasisIfNeeded( initialBasis, basicRows ); + + storeEquationsInDegradationChecker(); + + // The equations have changed, recreate the constraint matrix + delete[] constraintMatrix; + constraintMatrix = createConstraintMatrix(); + + unsigned n = _preprocessedQuery->getNumberOfVariables(); + _boundManager.initialize( n ); + + initializeTableau( constraintMatrix, initialBasis ); + _boundManager.initializeBoundExplainer( n, _tableau->getM() ); + delete[] constraintMatrix; + + if ( _produceUNSATProofs ) + { + _UNSATCertificate = new UnsatCertificateNode( NULL, PiecewiseLinearCaseSplit() ); + _UNSATCertificateCurrentPointer->set( _UNSATCertificate ); + _UNSATCertificate->setVisited(); + _groundBoundManager.initialize( n ); + + for ( unsigned i = 0; i < n; ++i ) + { + _groundBoundManager.setUpperBound( i, _preprocessedQuery->getUpperBound( i ) ); + _groundBoundManager.setLowerBound( i, _preprocessedQuery->getLowerBound( i ) ); + } + } + } + else + { + ASSERT( _lpSolverType == LPSolverType::GUROBI ); + + ASSERT( GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH == true ); + + if ( _verbosity > 0 ) + printf( "Using Gurobi to solve LP...\n" ); + + unsigned n = _preprocessedQuery->getNumberOfVariables(); + unsigned m = _preprocessedQuery->getEquations().size(); + // Only use BoundManager to store the bounds. + _boundManager.initialize( n ); + _tableau->setDimensions( m, n ); + initializeBoundsAndConstraintWatchersInTableau( n ); + } + + for ( const auto &constraint : _plConstraints ) + constraint->registerTableau( _tableau ); + for ( const auto &constraint : _nlConstraints ) + constraint->registerTableau( _tableau ); + + if ( _networkLevelReasoner && Options::get()->getBool( Options::DUMP_BOUNDS ) ) + _networkLevelReasoner->dumpBounds(); + + if ( GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH ) + { + _soiManager = std::unique_ptr( + new SumOfInfeasibilitiesManager( *_preprocessedQuery, *_tableau ) ); + _soiManager->setStatistics( &_statistics ); + } + + if ( GlobalConfiguration::WARM_START ) + warmStart(); + + decideBranchingHeuristics(); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.setLongAttribute( Statistics::PREPROCESSING_TIME_MICRO, + TimeUtils::timePassed( start, end ) ); + + if ( !_tableau->allBoundsValid() ) + { + // Some variable bounds are invalid, so the query is unsat + throw InfeasibleQueryException(); + } + } + catch ( const InfeasibleQueryException & ) + { + ENGINE_LOG( "processInputQuery done\n" ); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.setLongAttribute( Statistics::PREPROCESSING_TIME_MICRO, + TimeUtils::timePassed( start, end ) ); + + _exitCode = Engine::UNSAT; + return false; + } + + ENGINE_LOG( "processInputQuery done\n" ); + + DEBUG( { + // Initially, all constraints should be active + for ( const auto &plc : _plConstraints ) + { + ASSERT( plc->isActive() ); + } + } ); + + _smtCore.storeDebuggingSolution( _preprocessedQuery->_debuggingSolution ); + return true; +} + +void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) +{ + if ( _networkLevelReasoner && Options::get()->gurobiEnabled() ) + { + // Obtain from and store bounds into inputquery if it is not null. + if ( inputQuery ) + _networkLevelReasoner->obtainCurrentBounds( *inputQuery ); + else + _networkLevelReasoner->obtainCurrentBounds(); + + // TODO: Remove this block after getting ready to support sigmoid with MILP Bound + // Tightening. + if ( ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::MILP_ENCODING || + _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL || + _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION ) && + _preprocessedQuery->getNonlinearConstraints().size() > 0 ) + throw MarabouError( MarabouError::FEATURE_NOT_YET_SUPPORTED, + "Marabou doesn't support sigmoid with MILP Bound Tightening" ); + + switch ( _milpSolverBoundTighteningType ) + { + case MILPSolverBoundTighteningType::LP_RELAXATION: + case MILPSolverBoundTighteningType::LP_RELAXATION_INCREMENTAL: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: + _networkLevelReasoner->lpRelaxationPropagation(); + break; + case MILPSolverBoundTighteningType::MILP_ENCODING: + case MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL: + _networkLevelReasoner->MILPPropagation(); + break; + case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: + _networkLevelReasoner->iterativePropagation(); + break; + case MILPSolverBoundTighteningType::NONE: + return; + } + List tightenings; + _networkLevelReasoner->getConstraintTightenings( tightenings ); + + + if ( inputQuery ) + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB && + FloatUtils::gt( tightening._value, + inputQuery->getLowerBound( tightening._variable ) ) ) + inputQuery->setLowerBound( tightening._variable, tightening._value ); + if ( tightening._type == Tightening::UB && + FloatUtils::lt( tightening._value, + inputQuery->getUpperBound( tightening._variable ) ) ) + inputQuery->setUpperBound( tightening._variable, tightening._value ); + } + } + else + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + _tableau->tightenLowerBound( tightening._variable, tightening._value ); + + else if ( tightening._type == Tightening::UB ) + _tableau->tightenUpperBound( tightening._variable, tightening._value ); + } + } + } +} + +void Engine::performAdditionalBackwardAnalysisIfNeeded() +{ + if ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE || + _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE ) + { + unsigned tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); + if ( _verbosity > 0 ) + printf( "Backward analysis tightened %u bounds\n", tightened ); + while ( tightened && + _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE && + GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS ) + { + performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); + tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); + if ( _verbosity > 0 ) + printf( "Backward analysis tightened %u bounds\n", tightened ); + } + } +} + +void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIndex ) +{ + if ( _produceUNSATProofs ) + return; + + if ( _networkLevelReasoner && _isGurobyEnabled && _performLpTighteningAfterSplit && + _milpSolverBoundTighteningType != MILPSolverBoundTighteningType::NONE ) + { + _networkLevelReasoner->obtainCurrentBounds(); + _networkLevelReasoner->clearConstraintTightenings(); + + switch ( _milpSolverBoundTighteningType ) + { + case MILPSolverBoundTighteningType::LP_RELAXATION: + _networkLevelReasoner->LPTighteningForOneLayer( targetIndex ); + break; + case MILPSolverBoundTighteningType::LP_RELAXATION_INCREMENTAL: + return; + case MILPSolverBoundTighteningType::MILP_ENCODING: + _networkLevelReasoner->MILPTighteningForOneLayer( targetIndex ); + break; + case MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL: + return; + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: + case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: + case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: + case MILPSolverBoundTighteningType::NONE: + return; + } + List tightenings; + _networkLevelReasoner->getConstraintTightenings( tightenings ); + + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + _tableau->tightenLowerBound( tightening._variable, tightening._value ); + + else if ( tightening._type == Tightening::UB ) + _tableau->tightenUpperBound( tightening._variable, tightening._value ); + } + } +} + +void Engine::extractSolution( IQuery &inputQuery, Preprocessor *preprocessor ) +{ + Preprocessor *preprocessorInUse = nullptr; + if ( preprocessor != nullptr ) + preprocessorInUse = preprocessor; + else if ( _preprocessingEnabled ) + preprocessorInUse = &_preprocessor; + + for ( unsigned i = 0; i < inputQuery.getNumberOfVariables(); ++i ) + { + if ( preprocessorInUse ) + { + // Symbolically fixed variables are skipped. They will be re-constructed in the end. + if ( preprocessorInUse->variableIsUnusedAndSymbolicallyFixed( i ) ) + continue; + + // Has the variable been merged into another? + unsigned variable = i; + while ( preprocessorInUse->variableIsMerged( variable ) ) + variable = preprocessorInUse->getMergedIndex( variable ); + + // Fixed variables are easy: return the value they've been fixed to. + if ( preprocessorInUse->variableIsFixed( variable ) ) + { + inputQuery.setSolutionValue( i, preprocessorInUse->getFixedValue( variable ) ); + continue; + } + + // We know which variable to look for, but it may have been assigned + // a new index, due to variable elimination + variable = preprocessorInUse->getNewIndex( variable ); + + // Finally, set the assigned value + inputQuery.setSolutionValue( i, _tableau->getValue( variable ) ); + } + else + { + inputQuery.setSolutionValue( i, _tableau->getValue( i ) ); + } + } + + if ( preprocessorInUse ) + { + /* + Recover the assignment of eliminated neurons (e.g., due to merging of WS layers) + */ + preprocessorInUse->setSolutionValuesOfEliminatedNeurons( inputQuery ); + } +} + +bool Engine::allVarsWithinBounds() const +{ + if ( _lpSolverType == LPSolverType::GUROBI ) + { + ASSERT( _gurobi ); + return _gurobi->haveFeasibleSolution(); + } + else + return !_tableau->existsBasicOutOfBounds(); +} + +void Engine::collectViolatedPlConstraints() +{ + _violatedPlConstraints.clear(); + for ( const auto &constraint : _plConstraints ) + { + if ( constraint->isActive() && !constraint->satisfied() ) + _violatedPlConstraints.append( constraint ); + } +} + +bool Engine::allPlConstraintsHold() +{ + return _violatedPlConstraints.empty(); +} + +bool Engine::allNonlinearConstraintsHold() +{ + for ( const auto &constraint : _nlConstraints ) + { + if ( !constraint->satisfied() ) + return false; + } + return true; +} + +bool Engine::hasBranchingCandidate() +{ + for ( const auto &constraint : _plConstraints ) + { + if ( constraint->isActive() && !constraint->phaseFixed() ) + return true; + } + return false; +} + +void Engine::selectViolatedPlConstraint() +{ + ASSERT( !_violatedPlConstraints.empty() ); + + _plConstraintToFix = _smtCore.chooseViolatedConstraintForFixing( _violatedPlConstraints ); + + ASSERT( _plConstraintToFix ); +} + +void Engine::reportPlViolation() +{ + _smtCore.reportViolatedConstraint( _plConstraintToFix ); +} + +void Engine::storeState( EngineState &state, TableauStateStorageLevel level ) const +{ + _tableau->storeState( state._tableauState, level ); + state._tableauStateStorageLevel = level; + + for ( const auto &constraint : _plConstraints ) + state._plConstraintToState[constraint] = constraint->duplicateConstraint(); + + state._numPlConstraintsDisabledByValidSplits = _numPlConstraintsDisabledByValidSplits; +} + +void Engine::restoreState( const EngineState &state ) +{ + ENGINE_LOG( "Restore state starting" ); + + if ( state._tableauStateStorageLevel == TableauStateStorageLevel::STORE_NONE ) + throw MarabouError( MarabouError::RESTORING_ENGINE_FROM_INVALID_STATE ); + + ENGINE_LOG( "\tRestoring tableau state" ); + _tableau->restoreState( state._tableauState, state._tableauStateStorageLevel ); + + ENGINE_LOG( "\tRestoring constraint states" ); + for ( auto &constraint : _plConstraints ) + { + if ( !state._plConstraintToState.exists( constraint ) ) + throw MarabouError( MarabouError::MISSING_PL_CONSTRAINT_STATE ); + + constraint->restoreState( state._plConstraintToState[constraint] ); + } + + _numPlConstraintsDisabledByValidSplits = state._numPlConstraintsDisabledByValidSplits; + + if ( _lpSolverType == LPSolverType::NATIVE ) + { + // Make sure the data structures are initialized to the correct size + _rowBoundTightener->setDimensions(); + adjustWorkMemorySize(); + _activeEntryStrategy->resizeHook( _tableau ); + _costFunctionManager->initialize(); + } + + // Reset the violation counts in the SMT core + _smtCore.resetSplitConditions(); +} + +void Engine::setNumPlConstraintsDisabledByValidSplits( unsigned numConstraints ) +{ + _numPlConstraintsDisabledByValidSplits = numConstraints; +} + +bool Engine::attemptToMergeVariables( unsigned x1, unsigned x2 ) +{ + /* + First, we need to ensure that the variables are both non-basic. + */ + + unsigned n = _tableau->getN(); + unsigned m = _tableau->getM(); + + if ( _tableau->isBasic( x1 ) ) + { + TableauRow x1Row( n - m ); + _tableau->getTableauRow( _tableau->variableToIndex( x1 ), &x1Row ); + + bool found = false; + double bestCoefficient = 0.0; + unsigned nonBasic = 0; + for ( unsigned i = 0; i < n - m; ++i ) + { + if ( x1Row._row[i]._var != x2 ) + { + double contender = FloatUtils::abs( x1Row._row[i]._coefficient ); + if ( FloatUtils::gt( contender, bestCoefficient ) ) + { + found = true; + nonBasic = x1Row._row[i]._var; + bestCoefficient = contender; + } + } + } + + if ( !found ) + return false; + + _tableau->setEnteringVariableIndex( _tableau->variableToIndex( nonBasic ) ); + _tableau->setLeavingVariableIndex( _tableau->variableToIndex( x1 ) ); + + // Make sure the change column and pivot row are up-to-date - strategies + // such as projected steepest edge need these for their internal updates. + _tableau->computeChangeColumn(); + _tableau->computePivotRow(); + + _activeEntryStrategy->prePivotHook( _tableau, false ); + _tableau->performDegeneratePivot(); + _activeEntryStrategy->postPivotHook( _tableau, false ); + } + + if ( _tableau->isBasic( x2 ) ) + { + TableauRow x2Row( n - m ); + _tableau->getTableauRow( _tableau->variableToIndex( x2 ), &x2Row ); + + bool found = false; + double bestCoefficient = 0.0; + unsigned nonBasic = 0; + for ( unsigned i = 0; i < n - m; ++i ) + { + if ( x2Row._row[i]._var != x1 ) + { + double contender = FloatUtils::abs( x2Row._row[i]._coefficient ); + if ( FloatUtils::gt( contender, bestCoefficient ) ) + { + found = true; + nonBasic = x2Row._row[i]._var; + bestCoefficient = contender; + } + } + } + + if ( !found ) + return false; + + _tableau->setEnteringVariableIndex( _tableau->variableToIndex( nonBasic ) ); + _tableau->setLeavingVariableIndex( _tableau->variableToIndex( x2 ) ); + + // Make sure the change column and pivot row are up-to-date - strategies + // such as projected steepest edge need these for their internal updates. + _tableau->computeChangeColumn(); + _tableau->computePivotRow(); + + _activeEntryStrategy->prePivotHook( _tableau, false ); + _tableau->performDegeneratePivot(); + _activeEntryStrategy->postPivotHook( _tableau, false ); + } + + // Both variables are now non-basic, so we can merge their columns + _tableau->mergeColumns( x1, x2 ); + DEBUG( _tableau->verifyInvariants() ); + + // Reset the entry strategy + _activeEntryStrategy->initialize( _tableau ); + + return true; +} + +void Engine::applySplit( const PiecewiseLinearCaseSplit &split ) +{ + ENGINE_LOG( "" ); + ENGINE_LOG( "Applying a split. " ); + + DEBUG( _tableau->verifyInvariants() ); + + List bounds = split.getBoundTightenings(); + List equations = split.getEquations(); + + // We assume that case splits only apply new bounds but do not apply + // new equations. This can always be made possible. + if ( _lpSolverType != LPSolverType::NATIVE && equations.size() > 0 ) + throw MarabouError( MarabouError::FEATURE_NOT_YET_SUPPORTED, + "Can only update bounds when using non-native" + "simplex engine!" ); + + for ( auto &equation : equations ) + { + /* + First, adjust the equation if any variables have been merged. + E.g., if the equation is x1 + x2 + x3 = 0, and x1 and x2 have been + merged, the equation becomes 2x1 + x3 = 0 + */ + for ( auto &addend : equation._addends ) + addend._variable = _tableau->getVariableAfterMerging( addend._variable ); + + List::iterator addend; + List::iterator otherAddend; + + addend = equation._addends.begin(); + while ( addend != equation._addends.end() ) + { + otherAddend = addend; + ++otherAddend; + + while ( otherAddend != equation._addends.end() ) + { + if ( otherAddend->_variable == addend->_variable ) + { + addend->_coefficient += otherAddend->_coefficient; + otherAddend = equation._addends.erase( otherAddend ); + } + else + ++otherAddend; + } + + if ( FloatUtils::isZero( addend->_coefficient ) ) + addend = equation._addends.erase( addend ); + else + ++addend; + } + + /* + In the general case, we just add the new equation to the tableau. + However, we also support a very common case: equations of the form + x1 = x2, which are common, e.g., with ReLUs. For these equations we + may be able to merge two columns of the tableau. + */ + unsigned x1, x2; + bool canMergeColumns = + // Only if the flag is on + GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS && + // Only if the equation has the correct form + equation.isVariableMergingEquation( x1, x2 ) && + // And only if the variables are not out of bounds + ( !_tableau->isBasic( x1 ) || + !_tableau->basicOutOfBounds( _tableau->variableToIndex( x1 ) ) ) && + ( !_tableau->isBasic( x2 ) || + !_tableau->basicOutOfBounds( _tableau->variableToIndex( x2 ) ) ); + + bool columnsSuccessfullyMerged = false; + if ( canMergeColumns ) + columnsSuccessfullyMerged = attemptToMergeVariables( x1, x2 ); + + if ( !columnsSuccessfullyMerged ) + { + // General case: add a new equation to the tableau + unsigned auxVariable = _tableau->addEquation( equation ); + _activeEntryStrategy->resizeHook( _tableau ); + + switch ( equation._type ) + { + case Equation::GE: + bounds.append( Tightening( auxVariable, 0.0, Tightening::UB ) ); + break; + + case Equation::LE: + bounds.append( Tightening( auxVariable, 0.0, Tightening::LB ) ); + break; + + case Equation::EQ: + bounds.append( Tightening( auxVariable, 0.0, Tightening::LB ) ); + bounds.append( Tightening( auxVariable, 0.0, Tightening::UB ) ); + break; + + default: + ASSERT( false ); + break; + } + } + } + + if ( _lpSolverType == LPSolverType::NATIVE ) + { + adjustWorkMemorySize(); + } + + for ( auto &bound : bounds ) + { + unsigned variable = _tableau->getVariableAfterMerging( bound._variable ); + + if ( bound._type == Tightening::LB ) + { + ENGINE_LOG( + Stringf( "x%u: lower bound set to %.3lf", variable, bound._value ).ascii() ); + if ( _produceUNSATProofs && + FloatUtils::gt( bound._value, _boundManager.getLowerBound( bound._variable ) ) ) + { + _boundManager.resetExplanation( variable, Tightening::LB ); + updateGroundLowerBound( variable, bound._value ); + _boundManager.tightenLowerBound( variable, bound._value ); + } + else if ( !_produceUNSATProofs ) + _boundManager.tightenLowerBound( variable, bound._value ); + } + else + { + ENGINE_LOG( + Stringf( "x%u: upper bound set to %.3lf", variable, bound._value ).ascii() ); + if ( _produceUNSATProofs && + FloatUtils::lt( bound._value, _boundManager.getUpperBound( bound._variable ) ) ) + { + _boundManager.resetExplanation( variable, Tightening::UB ); + updateGroundUpperBound( variable, bound._value ); + _boundManager.tightenUpperBound( variable, bound._value ); + } + else if ( !_produceUNSATProofs ) + _boundManager.tightenUpperBound( variable, bound._value ); + } + } + + if ( _produceUNSATProofs && _UNSATCertificateCurrentPointer ) + ( **_UNSATCertificateCurrentPointer ).setVisited(); + + DEBUG( _tableau->verifyInvariants() ); + ENGINE_LOG( "Done with split\n" ); +} + +void Engine::applyBoundTightenings() +{ + List tightenings; + _boundManager.getTightenings( tightenings ); + + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + _tableau->tightenLowerBound( tightening._variable, tightening._value ); + else + _tableau->tightenUpperBound( tightening._variable, tightening._value ); + } +} + +void Engine::applyAllRowTightenings() +{ + applyBoundTightenings(); +} + +void Engine::applyAllConstraintTightenings() +{ + applyBoundTightenings(); +} + +void Engine::applyAllBoundTightenings() +{ + struct timespec start = TimeUtils::sampleMicro(); + + if ( _lpSolverType == LPSolverType::NATIVE ) + applyAllRowTightenings(); + applyAllConstraintTightenings(); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_APPLYING_STORED_TIGHTENINGS_MICRO, + TimeUtils::timePassed( start, end ) ); +} + +bool Engine::applyAllValidConstraintCaseSplits() +{ + struct timespec start = TimeUtils::sampleMicro(); + + bool appliedSplit = false; + for ( auto &constraint : _plConstraints ) + if ( applyValidConstraintCaseSplit( constraint ) ) + appliedSplit = true; + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_PERFORMING_VALID_CASE_SPLITS_MICRO, + TimeUtils::timePassed( start, end ) ); + + return appliedSplit; +} + +bool Engine::applyValidConstraintCaseSplit( PiecewiseLinearConstraint *constraint ) +{ + if ( constraint->isActive() && constraint->phaseFixed() ) + { + String constraintString; + constraint->dump( constraintString ); + ENGINE_LOG( Stringf( "A constraint has become valid. Dumping constraint: %s", + constraintString.ascii() ) + .ascii() ); + + constraint->setActiveConstraint( false ); + PiecewiseLinearCaseSplit validSplit = constraint->getValidCaseSplit(); + _smtCore.recordImpliedValidSplit( validSplit ); + applySplit( validSplit ); + + if ( _soiManager ) + _soiManager->removeCostComponentFromHeuristicCost( constraint ); + ++_numPlConstraintsDisabledByValidSplits; + + return true; + } + + return false; +} + +bool Engine::shouldCheckDegradation() +{ + return _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % + GlobalConfiguration::DEGRADATION_CHECKING_FREQUENCY == + 0; +} + +bool Engine::highDegradation() +{ + struct timespec start = TimeUtils::sampleMicro(); + + double degradation = _degradationChecker.computeDegradation( *_tableau ); + _statistics.setDoubleAttribute( Statistics::CURRENT_DEGRADATION, degradation ); + if ( FloatUtils::gt( degradation, + _statistics.getDoubleAttribute( Statistics::MAX_DEGRADATION ) ) ) + _statistics.setDoubleAttribute( Statistics::MAX_DEGRADATION, degradation ); + + bool result = FloatUtils::gt( degradation, GlobalConfiguration::DEGRADATION_THRESHOLD ); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_DEGRADATION_CHECKING, + TimeUtils::timePassed( start, end ) ); + + // Debug + if ( result ) + printf( "High degradation found!\n" ); + // + + return result; +} + +void Engine::tightenBoundsOnConstraintMatrix() +{ + struct timespec start = TimeUtils::sampleMicro(); + + if ( _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % + GlobalConfiguration::BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY == + 0 ) + { + _rowBoundTightener->examineConstraintMatrix( true ); + _statistics.incLongAttribute( Statistics::NUM_BOUND_TIGHTENINGS_ON_CONSTRAINT_MATRIX ); + } + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_CONSTRAINT_MATRIX_BOUND_TIGHTENING_MICRO, + TimeUtils::timePassed( start, end ) ); +} + +void Engine::explicitBasisBoundTightening() +{ + struct timespec start = TimeUtils::sampleMicro(); + + bool saturation = GlobalConfiguration::EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION; + + _statistics.incLongAttribute( Statistics::NUM_BOUND_TIGHTENINGS_ON_EXPLICIT_BASIS ); + + switch ( GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE ) + { + case GlobalConfiguration::COMPUTE_INVERTED_BASIS_MATRIX: + _rowBoundTightener->examineInvertedBasisMatrix( saturation ); + break; + + case GlobalConfiguration::USE_IMPLICIT_INVERTED_BASIS_MATRIX: + _rowBoundTightener->examineImplicitInvertedBasisMatrix( saturation ); + break; + + case GlobalConfiguration::DISABLE_EXPLICIT_BASIS_TIGHTENING: + break; + } + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_EXPLICIT_BASIS_BOUND_TIGHTENING_MICRO, + TimeUtils::timePassed( start, end ) ); +} + +void Engine::performPrecisionRestoration( PrecisionRestorer::RestoreBasics restoreBasics ) +{ + struct timespec start = TimeUtils::sampleMicro(); + + // debug + double before = _degradationChecker.computeDegradation( *_tableau ); + // + + _precisionRestorer.restorePrecision( *this, *_tableau, _smtCore, restoreBasics ); + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_PRECISION_RESTORATION, + TimeUtils::timePassed( start, end ) ); + + _statistics.incUnsignedAttribute( Statistics::NUM_PRECISION_RESTORATIONS ); + + // debug + double after = _degradationChecker.computeDegradation( *_tableau ); + if ( _verbosity > 0 ) + printf( "Performing precision restoration. Degradation before: %.15lf. After: %.15lf\n", + before, + after ); + // + + if ( highDegradation() && ( restoreBasics == PrecisionRestorer::RESTORE_BASICS ) ) + { + // First round, with basic restoration, still resulted in high degradation. + // Try again! + start = TimeUtils::sampleMicro(); + _precisionRestorer.restorePrecision( + *this, *_tableau, _smtCore, PrecisionRestorer::DO_NOT_RESTORE_BASICS ); + end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_PRECISION_RESTORATION, + TimeUtils::timePassed( start, end ) ); + _statistics.incUnsignedAttribute( Statistics::NUM_PRECISION_RESTORATIONS ); + + // debug + double afterSecond = _degradationChecker.computeDegradation( *_tableau ); + if ( _verbosity > 0 ) + printf( + "Performing 2nd precision restoration. Degradation before: %.15lf. After: %.15lf\n", + after, + afterSecond ); + + if ( highDegradation() ) + throw MarabouError( MarabouError::RESTORATION_FAILED_TO_RESTORE_PRECISION ); + } +} + +void Engine::storeInitialEngineState() +{ + if ( !_initialStateStored ) + { + _precisionRestorer.storeInitialEngineState( *this ); + _initialStateStored = true; + } +} + +bool Engine::basisRestorationNeeded() const +{ + return _basisRestorationRequired == Engine::STRONG_RESTORATION_NEEDED || + _basisRestorationRequired == Engine::WEAK_RESTORATION_NEEDED; +} + +const Statistics *Engine::getStatistics() const +{ + return &_statistics; +} + +Query *Engine::getQuery() +{ + return &( *_preprocessedQuery ); +} + +void Engine::checkBoundCompliancyWithDebugSolution() +{ + if ( _smtCore.checkSkewFromDebuggingSolution() ) + { + // The stack is compliant, we should not have learned any non-compliant bounds + for ( const auto &var : _preprocessedQuery->_debuggingSolution ) + { + // printf( "Looking at var %u\n", var.first ); + + if ( FloatUtils::gt( _tableau->getLowerBound( var.first ), var.second, 1e-5 ) ) + { + printf( "Error! The stack is compliant, but learned an non-compliant bound: " + "Solution for x%u is %.15lf, but learned lower bound %.15lf\n", + var.first, + var.second, + _tableau->getLowerBound( var.first ) ); + + throw MarabouError( MarabouError::DEBUGGING_ERROR ); + } + + if ( FloatUtils::lt( _tableau->getUpperBound( var.first ), var.second, 1e-5 ) ) + { + printf( "Error! The stack is compliant, but learned an non-compliant bound: " + "Solution for %u is %.15lf, but learned upper bound %.15lf\n", + var.first, + var.second, + _tableau->getUpperBound( var.first ) ); + + throw MarabouError( MarabouError::DEBUGGING_ERROR ); + } + } + } +} + +void Engine::quitSignal() +{ + _quitRequested = true; +} + +Engine::ExitCode Engine::getExitCode() const +{ + return _exitCode; +} + +std::atomic_bool *Engine::getQuitRequested() +{ + return &_quitRequested; +} + +List Engine::getInputVariables() const +{ + return _preprocessedQuery->getInputVariables(); +} + +void Engine::performSimulation() +{ + if ( _simulationSize == 0 || !_networkLevelReasoner || + _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::NONE || + _produceUNSATProofs ) + { + ENGINE_LOG( Stringf( "Skip simulation..." ).ascii() ); + return; + } + + // outer vector is for neuron + // inner vector is for simulation value + Vector> simulations; + + std::mt19937 mt( GlobalConfiguration::SIMULATION_RANDOM_SEED ); + + for ( unsigned i = 0; i < _networkLevelReasoner->getLayer( 0 )->getSize(); ++i ) + { + std::uniform_real_distribution distribution( + _networkLevelReasoner->getLayer( 0 )->getLb( i ), + _networkLevelReasoner->getLayer( 0 )->getUb( i ) ); + Vector simulationInput( _simulationSize ); + + for ( unsigned j = 0; j < _simulationSize; ++j ) + simulationInput[j] = distribution( mt ); + simulations.append( simulationInput ); + } + _networkLevelReasoner->simulate( &simulations ); +} + +unsigned Engine::performSymbolicBoundTightening( Query *inputQuery ) +{ + if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::NONE || + ( !_networkLevelReasoner ) || _produceUNSATProofs ) + return 0; + + struct timespec start = TimeUtils::sampleMicro(); + + unsigned numTightenedBounds = 0; + + // Step 1: tell the NLR about the current bounds + if ( inputQuery ) + { + // Obtain from and store bounds into inputquery if it is not null. + _networkLevelReasoner->obtainCurrentBounds( *inputQuery ); + } + else + { + // Get bounds from Tableau. + _networkLevelReasoner->obtainCurrentBounds(); + } + + // Step 2: perform SBT + if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING ) + _networkLevelReasoner->symbolicBoundPropagation(); + else if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::DEEP_POLY ) + _networkLevelReasoner->deepPolyPropagation(); + + // Step 3: Extract the bounds + List tightenings; + _networkLevelReasoner->getConstraintTightenings( tightenings ); + + if ( inputQuery ) + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB && + FloatUtils::gt( tightening._value, + inputQuery->getLowerBound( tightening._variable ) ) ) + { + inputQuery->setLowerBound( tightening._variable, tightening._value ); + ++numTightenedBounds; + } + + if ( tightening._type == Tightening::UB && + FloatUtils::lt( tightening._value, + inputQuery->getUpperBound( tightening._variable ) ) ) + { + inputQuery->setUpperBound( tightening._variable, tightening._value ); + ++numTightenedBounds; + } + } + } + else + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB && + FloatUtils::gt( tightening._value, + _tableau->getLowerBound( tightening._variable ) ) ) + { + _tableau->tightenLowerBound( tightening._variable, tightening._value ); + ++numTightenedBounds; + } + + if ( tightening._type == Tightening::UB && + FloatUtils::lt( tightening._value, + _tableau->getUpperBound( tightening._variable ) ) ) + { + _tableau->tightenUpperBound( tightening._variable, tightening._value ); + ++numTightenedBounds; + } + } + } + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_PERFORMING_SYMBOLIC_BOUND_TIGHTENING, + TimeUtils::timePassed( start, end ) ); + _statistics.incLongAttribute( Statistics::NUM_TIGHTENINGS_FROM_SYMBOLIC_BOUND_TIGHTENING, + numTightenedBounds ); + return numTightenedBounds; +} + +bool Engine::shouldExitDueToTimeout( double timeout ) const +{ + // A timeout value of 0 means no time limit + if ( timeout == 0 ) + return false; + + return static_cast( _statistics.getTotalTimeInMicro() ) / MICROSECONDS_TO_SECONDS > + timeout; +} + +void Engine::preContextPushHook() +{ + struct timespec start = TimeUtils::sampleMicro(); + _boundManager.storeLocalBounds(); + if ( _produceUNSATProofs ) + _groundBoundManager.storeLocalBounds(); + struct timespec end = TimeUtils::sampleMicro(); + + _statistics.incLongAttribute( Statistics::TIME_CONTEXT_PUSH_HOOK, + TimeUtils::timePassed( start, end ) ); +} + +void Engine::postContextPopHook() +{ + struct timespec start = TimeUtils::sampleMicro(); + + _boundManager.restoreLocalBounds(); + if ( _produceUNSATProofs ) + _groundBoundManager.restoreLocalBounds(); + _tableau->postContextPopHook(); + + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_CONTEXT_POP_HOOK, + TimeUtils::timePassed( start, end ) ); +} + +void Engine::reset() +{ + resetStatistics(); + _sncMode = false; + clearViolatedPLConstraints(); + resetSmtCore(); + resetBoundTighteners(); + resetExitCode(); +} + +void Engine::resetStatistics() +{ + Statistics statistics; + _statistics = statistics; + _smtCore.setStatistics( &_statistics ); + _tableau->setStatistics( &_statistics ); + _rowBoundTightener->setStatistics( &_statistics ); + _preprocessor.setStatistics( &_statistics ); + _activeEntryStrategy->setStatistics( &_statistics ); + + _statistics.stampStartingTime(); +} + +void Engine::clearViolatedPLConstraints() +{ + _violatedPlConstraints.clear(); + _plConstraintToFix = NULL; +} + +void Engine::resetSmtCore() +{ + _smtCore.reset(); + _smtCore.initializeScoreTrackerIfNeeded( _plConstraints ); +} + +void Engine::resetExitCode() +{ + _exitCode = Engine::NOT_DONE; +} + +void Engine::resetBoundTighteners() +{ +} + +void Engine::warmStart() +{ + // An NLR is required for a warm start + if ( !_networkLevelReasoner ) + return; + + // First, choose an arbitrary assignment for the input variables + unsigned numInputVariables = _preprocessedQuery->getNumInputVariables(); + unsigned numOutputVariables = _preprocessedQuery->getNumOutputVariables(); + + if ( numInputVariables == 0 ) + { + // Trivial case: all inputs are fixed, nothing to evaluate + return; + } + + double *inputAssignment = new double[numInputVariables]; + double *outputAssignment = new double[numOutputVariables]; + + for ( unsigned i = 0; i < numInputVariables; ++i ) + { + unsigned variable = _preprocessedQuery->inputVariableByIndex( i ); + inputAssignment[i] = _tableau->getLowerBound( variable ); + } + + // Evaluate the network for this assignment + _networkLevelReasoner->evaluate( inputAssignment, outputAssignment ); + + // Try to update as many variables as possible to match their assignment + for ( unsigned i = 0; i < _networkLevelReasoner->getNumberOfLayers(); ++i ) + { + const NLR::Layer *layer = _networkLevelReasoner->getLayer( i ); + unsigned layerSize = layer->getSize(); + const double *assignment = layer->getAssignment(); + + for ( unsigned j = 0; j < layerSize; ++j ) + { + if ( layer->neuronHasVariable( j ) ) + { + unsigned variable = layer->neuronToVariable( j ); + if ( !_tableau->isBasic( variable ) ) + _tableau->setNonBasicAssignment( variable, assignment[j], false ); + } + } + } + + // We did what we could for the non-basics; now let the tableau compute + // the basic assignment + _tableau->computeAssignment(); + + delete[] outputAssignment; + delete[] inputAssignment; +} + +void Engine::checkOverallProgress() +{ + // Get fresh statistics + unsigned numVisitedStates = + _statistics.getUnsignedAttribute( Statistics::NUM_VISITED_TREE_STATES ); + unsigned long long currentIteration = + _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ); + + if ( numVisitedStates > _lastNumVisitedStates ) + { + // Progress has been made + _lastNumVisitedStates = numVisitedStates; + _lastIterationWithProgress = currentIteration; + } + else + { + // No progress has been made. If it's been too long, request a restoration + if ( currentIteration > + _lastIterationWithProgress + GlobalConfiguration::MAX_ITERATIONS_WITHOUT_PROGRESS ) + { + ENGINE_LOG( + "checkOverallProgress detected cycling. Requesting a precision restoration" ); + _basisRestorationRequired = Engine::STRONG_RESTORATION_NEEDED; + _lastIterationWithProgress = currentIteration; + } + } +} + +void Engine::updateDirections() +{ + if ( GlobalConfiguration::USE_POLARITY_BASED_DIRECTION_HEURISTICS ) + for ( const auto &constraint : _plConstraints ) + if ( constraint->supportPolarity() && constraint->isActive() && + !constraint->phaseFixed() ) + constraint->updateDirection(); +} + +void Engine::decideBranchingHeuristics() +{ + DivideStrategy divideStrategy = Options::get()->getDivideStrategy(); + if ( divideStrategy == DivideStrategy::Auto ) + { + if ( !_produceUNSATProofs && !_preprocessedQuery->getInputVariables().empty() && + _preprocessedQuery->getInputVariables().size() < + GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD ) + { + // NOTE: the benefit of input splitting is minimal with abstract intepretation disabled. + // Therefore, since the proof production mode does not currently support that, we do + // not perform input-splitting in proof production mode. + divideStrategy = DivideStrategy::LargestInterval; + if ( _verbosity >= 2 ) + printf( "Branching heuristics set to LargestInterval\n" ); + } + else + { + if ( GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH ) + { + divideStrategy = DivideStrategy::PseudoImpact; + if ( _verbosity >= 2 ) + printf( "Branching heuristics set to PseudoImpact\n" ); + } + else + { + divideStrategy = DivideStrategy::ReLUViolation; + if ( _verbosity >= 2 ) + printf( "Branching heuristics set to ReLUViolation\n" ); + } + } + } + ASSERT( divideStrategy != DivideStrategy::Auto ); + _smtCore.setBranchingHeuristics( divideStrategy ); + _smtCore.initializeScoreTrackerIfNeeded( _plConstraints ); +} + +PiecewiseLinearConstraint *Engine::pickSplitPLConstraintBasedOnPolarity() +{ + ENGINE_LOG( Stringf( "Using Polarity-based heuristics..." ).ascii() ); + + if ( !_networkLevelReasoner ) + return NULL; + + List constraints = + _networkLevelReasoner->getConstraintsInTopologicalOrder(); + + Map scoreToConstraint; + for ( auto &plConstraint : constraints ) + { + if ( plConstraint->supportPolarity() && plConstraint->isActive() && + !plConstraint->phaseFixed() ) + { + plConstraint->updateScoreBasedOnPolarity(); + scoreToConstraint[plConstraint->getScore()] = plConstraint; + if ( scoreToConstraint.size() >= GlobalConfiguration::POLARITY_CANDIDATES_THRESHOLD ) + break; + } + } + if ( scoreToConstraint.size() > 0 ) + { + ENGINE_LOG( Stringf( "Score of the picked ReLU: %f", ( *scoreToConstraint.begin() ).first ) + .ascii() ); + return ( *scoreToConstraint.begin() ).second; + } + else + return NULL; +} + +PiecewiseLinearConstraint *Engine::pickSplitPLConstraintBasedOnTopology() +{ + // We push the first unfixed ReLU in the topology order to the _candidatePlConstraints + ENGINE_LOG( Stringf( "Using EarliestReLU heuristics..." ).ascii() ); + + if ( !_networkLevelReasoner ) + throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_NOT_AVAILABLE ); + + List constraints = + _networkLevelReasoner->getConstraintsInTopologicalOrder(); + + for ( auto &plConstraint : constraints ) + { + if ( plConstraint->isActive() && !plConstraint->phaseFixed() ) + return plConstraint; + } + return NULL; +} + +PiecewiseLinearConstraint *Engine::pickSplitPLConstraintBasedOnIntervalWidth() +{ + // We push the first unfixed ReLU in the topology order to the _candidatePlConstraints + ENGINE_LOG( Stringf( "Using LargestInterval heuristics..." ).ascii() ); + + unsigned inputVariableWithLargestInterval = 0; + double largestIntervalSoFar = 0; + for ( const auto &variable : _preprocessedQuery->getInputVariables() ) + { + double interval = _tableau->getUpperBound( variable ) - _tableau->getLowerBound( variable ); + if ( interval > largestIntervalSoFar ) + { + inputVariableWithLargestInterval = variable; + largestIntervalSoFar = interval; + } + } + + if ( largestIntervalSoFar == 0 ) + return NULL; + else + { + double mid = ( _tableau->getLowerBound( inputVariableWithLargestInterval ) + + _tableau->getUpperBound( inputVariableWithLargestInterval ) ) / + 2; + PiecewiseLinearCaseSplit s1; + s1.storeBoundTightening( + Tightening( inputVariableWithLargestInterval, mid, Tightening::UB ) ); + PiecewiseLinearCaseSplit s2; + s2.storeBoundTightening( + Tightening( inputVariableWithLargestInterval, mid, Tightening::LB ) ); + + List splits; + splits.append( s1 ); + splits.append( s2 ); + _disjunctionForSplitting = + std::unique_ptr( new DisjunctionConstraint( splits ) ); + return _disjunctionForSplitting.get(); + } +} + +PiecewiseLinearConstraint *Engine::pickSplitPLConstraint( DivideStrategy strategy ) +{ + ENGINE_LOG( Stringf( "Picking a split PLConstraint..." ).ascii() ); + + PiecewiseLinearConstraint *candidatePLConstraint = NULL; + if ( strategy == DivideStrategy::PseudoImpact ) + { + if ( _smtCore.getStackDepth() > 3 ) + candidatePLConstraint = _smtCore.getConstraintsWithHighestScore(); + else if ( !_preprocessedQuery->getInputVariables().empty() && + _preprocessedQuery->getInputVariables().size() < + GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD ) + candidatePLConstraint = pickSplitPLConstraintBasedOnIntervalWidth(); + else + { + candidatePLConstraint = pickSplitPLConstraintBasedOnPolarity(); + if ( candidatePLConstraint == NULL ) + candidatePLConstraint = _smtCore.getConstraintsWithHighestScore(); + } + } + else if ( strategy == DivideStrategy::Polarity ) + candidatePLConstraint = pickSplitPLConstraintBasedOnPolarity(); + else if ( strategy == DivideStrategy::EarliestReLU ) + candidatePLConstraint = pickSplitPLConstraintBasedOnTopology(); + else if ( strategy == DivideStrategy::LargestInterval && + ( ( _smtCore.getStackDepth() + 1 ) % + GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY != + 0 ) ) + { + // Conduct interval splitting periodically. + candidatePLConstraint = pickSplitPLConstraintBasedOnIntervalWidth(); + } + ENGINE_LOG( + Stringf( ( candidatePLConstraint ? "Picked..." + : "Unable to pick using the current strategy..." ) ) + .ascii() ); + return candidatePLConstraint; +} + +PiecewiseLinearConstraint *Engine::pickSplitPLConstraintSnC( SnCDivideStrategy strategy ) +{ + PiecewiseLinearConstraint *candidatePLConstraint = NULL; + if ( strategy == SnCDivideStrategy::Polarity ) + candidatePLConstraint = pickSplitPLConstraintBasedOnPolarity(); + else if ( strategy == SnCDivideStrategy::EarliestReLU ) + candidatePLConstraint = pickSplitPLConstraintBasedOnTopology(); + + ENGINE_LOG( Stringf( "Done updating scores..." ).ascii() ); + ENGINE_LOG( + Stringf( ( candidatePLConstraint ? "Picked..." + : "Unable to pick using the current strategy..." ) ) + .ascii() ); + return candidatePLConstraint; +} + +bool Engine::restoreSmtState( SmtState &smtState ) +{ + try + { + ASSERT( _smtCore.getStackDepth() == 0 ); + + // Step 1: all implied valid splits at root + for ( auto &validSplit : smtState._impliedValidSplitsAtRoot ) + { + applySplit( validSplit ); + _smtCore.recordImpliedValidSplit( validSplit ); + } + + tightenBoundsOnConstraintMatrix(); + _boundManager.propagateTightenings(); + // For debugging purposes + checkBoundCompliancyWithDebugSolution(); + do + performSymbolicBoundTightening(); + while ( applyAllValidConstraintCaseSplits() ); + + // Step 2: replay the stack + for ( auto &stackEntry : smtState._stack ) + { + _smtCore.replaySmtStackEntry( stackEntry ); + // Do all the bound propagation, and set ReLU constraints to inactive (at + // least the one corresponding to the _activeSplit applied above. + tightenBoundsOnConstraintMatrix(); + + // For debugging purposes + checkBoundCompliancyWithDebugSolution(); + do + performSymbolicBoundTightening(); + while ( applyAllValidConstraintCaseSplits() ); + } + _boundManager.propagateTightenings(); + } + catch ( const InfeasibleQueryException & ) + { + // The current query is unsat, and we need to pop. + // If we're at level 0, the whole query is unsat. + if ( _produceUNSATProofs ) + explainSimplexFailure(); + + if ( !_smtCore.popSplit() ) + { + if ( _verbosity > 0 ) + { + printf( "\nEngine::solve: UNSAT query\n" ); + _statistics.print(); + } + _exitCode = Engine::UNSAT; + for ( PiecewiseLinearConstraint *p : _plConstraints ) + p->setActiveConstraint( true ); + return false; + } + } + return true; +} + +void Engine::storeSmtState( SmtState &smtState ) +{ + _smtCore.storeSmtState( smtState ); +} + +bool Engine::solveWithMILPEncoding( double timeoutInSeconds ) +{ + try + { + if ( _lpSolverType == LPSolverType::NATIVE && _tableau->basisMatrixAvailable() ) + { + explicitBasisBoundTightening(); + applyAllBoundTightenings(); + applyAllValidConstraintCaseSplits(); + } + + while ( applyAllValidConstraintCaseSplits() ) + { + performSymbolicBoundTightening(); + } + } + catch ( const InfeasibleQueryException & ) + { + _exitCode = Engine::UNSAT; + return false; + } + + ENGINE_LOG( "Encoding the input query with Gurobi...\n" ); + _gurobi = std::unique_ptr( new GurobiWrapper() ); + _tableau->setGurobi( &( *_gurobi ) ); + _milpEncoder = std::unique_ptr( new MILPEncoder( *_tableau ) ); + _milpEncoder->encodeQuery( *_gurobi, *_preprocessedQuery ); + ENGINE_LOG( "Query encoded in Gurobi...\n" ); + + double timeoutForGurobi = ( timeoutInSeconds == 0 ? FloatUtils::infinity() : timeoutInSeconds ); + ENGINE_LOG( Stringf( "Gurobi timeout set to %f\n", timeoutForGurobi ).ascii() ) + _gurobi->setTimeLimit( timeoutForGurobi ); + if ( !_sncMode ) + _gurobi->setNumberOfThreads( Options::get()->getInt( Options::NUM_WORKERS ) ); + _gurobi->setVerbosity( _verbosity > 0 ); + _gurobi->solve(); + + if ( _gurobi->haveFeasibleSolution() ) + { + if ( allNonlinearConstraintsHold() ) + { + _exitCode = IEngine::SAT; + return true; + } + else + { + _exitCode = IEngine::UNKNOWN; + return false; + } + } + else if ( _gurobi->infeasible() ) + _exitCode = IEngine::UNSAT; + else if ( _gurobi->timeout() ) + _exitCode = IEngine::TIMEOUT; + else + throw NLRError( NLRError::UNEXPECTED_RETURN_STATUS_FROM_GUROBI ); + return false; +} + +bool Engine::preprocessingEnabled() const +{ + return _preprocessingEnabled; +} + +Preprocessor *Engine::getPreprocessor() +{ + return &_preprocessor; +} + +bool Engine::performDeepSoILocalSearch() +{ + ENGINE_LOG( "Performing local search..." ); + struct timespec start = TimeUtils::sampleMicro(); + ASSERT( allVarsWithinBounds() ); + + // All the linear constraints have been satisfied at this point. + // Update the cost function + _soiManager->initializePhasePattern(); + + LinearExpression initialPhasePattern = _soiManager->getCurrentSoIPhasePattern(); + + if ( initialPhasePattern.isZero() ) + { + if ( hasBranchingCandidate() ) + while ( !_smtCore.needToSplit() ) + _smtCore.reportRejectedPhasePatternProposal(); + return false; + } + + minimizeHeuristicCost( initialPhasePattern ); + ASSERT( allVarsWithinBounds() ); + _soiManager->updateCurrentPhasePatternForSatisfiedPLConstraints(); + // Always accept the first phase pattern. + _soiManager->acceptCurrentPhasePattern(); + double costOfLastAcceptedPhasePattern = + computeHeuristicCost( _soiManager->getCurrentSoIPhasePattern() ); + + double costOfProposedPhasePattern = FloatUtils::infinity(); + bool lastProposalAccepted = true; + while ( !_smtCore.needToSplit() ) + { + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TOTAL_TIME_LOCAL_SEARCH_MICRO, + TimeUtils::timePassed( start, end ) ); + start = end; + + if ( lastProposalAccepted ) + { + /* + Check whether the optimal solution to the last accepted phase + is a real solution. We only check this when the last proposal + was accepted, because rejected phase pattern must have resulted in + increase in the SoI cost. + + HW: Another option is to only do this check when + costOfLastAcceptedPhasePattern is 0, but this might be too strict. + The overhead is low anyway. + */ + collectViolatedPlConstraints(); + if ( allPlConstraintsHold() ) + { + if ( _lpSolverType == LPSolverType::NATIVE && + _tableau->getBasicAssignmentStatus() != + ITableau::BASIC_ASSIGNMENT_JUST_COMPUTED ) + { + if ( _verbosity > 0 ) + { + printf( "Before declaring sat, recomputing...\n" ); + } + // Make sure that the assignment is precise before declaring success + _tableau->computeAssignment(); + // If we actually have a real satisfying assignment, + return false; + } + else + { + ENGINE_LOG( "Performing local search - done." ); + return true; + } + } + else if ( FloatUtils::isZero( costOfLastAcceptedPhasePattern ) ) + { + // Corner case: the SoI is minimal but there are still some PL + // constraints (those not in the SoI) unsatisfied. + // In this case, we bump up the score of PLConstraints not in + // the SoI with the hope to branch on them early. + bumpUpPseudoImpactOfPLConstraintsNotInSoI(); + if ( hasBranchingCandidate() ) + while ( !_smtCore.needToSplit() ) + _smtCore.reportRejectedPhasePatternProposal(); + return false; + } + } + + // No satisfying assignment found for the last accepted phase pattern, + // propose an update to it. + _soiManager->proposePhasePatternUpdate(); + minimizeHeuristicCost( _soiManager->getCurrentSoIPhasePattern() ); + _soiManager->updateCurrentPhasePatternForSatisfiedPLConstraints(); + costOfProposedPhasePattern = + computeHeuristicCost( _soiManager->getCurrentSoIPhasePattern() ); + + // We have the "local" effect of change the cost term of some + // PLConstraints in the phase pattern. Use this information to influence + // the branching decision. + updatePseudoImpactWithSoICosts( costOfLastAcceptedPhasePattern, + costOfProposedPhasePattern ); + + // Decide whether to accept the last proposal. + if ( _soiManager->decideToAcceptCurrentProposal( costOfLastAcceptedPhasePattern, + costOfProposedPhasePattern ) ) + { + _soiManager->acceptCurrentPhasePattern(); + costOfLastAcceptedPhasePattern = costOfProposedPhasePattern; + lastProposalAccepted = true; + } + else + { + _smtCore.reportRejectedPhasePatternProposal(); + lastProposalAccepted = false; + } + } + + ENGINE_LOG( "Performing local search - done" ); + return false; +} + +void Engine::minimizeHeuristicCost( const LinearExpression &heuristicCost ) +{ + ENGINE_LOG( "Optimizing w.r.t. the current heuristic cost..." ); + + if ( _lpSolverType == LPSolverType::GUROBI ) + { + minimizeCostWithGurobi( heuristicCost ); + + ENGINE_LOG( + Stringf( "Current heuristic cost: %f", _gurobi->getOptimalCostOrObjective() ).ascii() ); + } + else + { + _tableau->toggleOptimization( true ); + + _heuristicCost = heuristicCost; + + bool localOptimumReached = false; + while ( !localOptimumReached ) + { + DEBUG( _tableau->verifyInvariants() ); + + mainLoopStatistics(); + if ( _verbosity > 1 && + _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % + _statisticsPrintingFrequency == + 0 ) + _statistics.print(); + + if ( !allVarsWithinBounds() ) + throw VariableOutOfBoundDuringOptimizationException(); + + if ( performPrecisionRestorationIfNeeded() ) + continue; + + ASSERT( allVarsWithinBounds() ); + + localOptimumReached = performSimplexStep(); + } + _tableau->toggleOptimization( false ); + ENGINE_LOG( Stringf( "Current heuristic cost: %f", computeHeuristicCost( heuristicCost ) ) + .ascii() ); + } + + ENGINE_LOG( "Optimizing w.r.t. the current heuristic cost - done\n" ); +} + +double Engine::computeHeuristicCost( const LinearExpression &heuristicCost ) +{ + return ( _costFunctionManager->computeGivenCostFunctionDirectly( heuristicCost._addends ) + + heuristicCost._constant ); +} + +void Engine::updatePseudoImpactWithSoICosts( double costOfLastAcceptedPhasePattern, + double costOfProposedPhasePattern ) +{ + ASSERT( _soiManager ); + + const List &constraintsUpdated = + _soiManager->getConstraintsUpdatedInLastProposal(); + // Score is divided by the number of updated constraints in the last + // proposal. In the Sum of Infeasibilities paper, only one constraint + // is updated each time. But we might consider alternative proposal + // strategy in the future. + double score = ( fabs( costOfLastAcceptedPhasePattern - costOfProposedPhasePattern ) / + constraintsUpdated.size() ); + + ASSERT( constraintsUpdated.size() > 0 ); + // Update the Pseudo-Impact estimation. + for ( const auto &constraint : constraintsUpdated ) + _smtCore.updatePLConstraintScore( constraint, score ); +} + +void Engine::bumpUpPseudoImpactOfPLConstraintsNotInSoI() +{ + ASSERT( _soiManager ); + for ( const auto &plConstraint : _plConstraints ) + { + if ( plConstraint->isActive() && !plConstraint->supportSoI() && + !plConstraint->phaseFixed() && !plConstraint->satisfied() ) + _smtCore.updatePLConstraintScore( + plConstraint, GlobalConfiguration::SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI ); + } +} + +void Engine::informLPSolverOfBounds() +{ + if ( _lpSolverType == LPSolverType::GUROBI ) + { + struct timespec start = TimeUtils::sampleMicro(); + for ( unsigned i = 0; i < _preprocessedQuery->getNumberOfVariables(); ++i ) + { + String variableName = _milpEncoder->getVariableNameFromVariable( i ); + _gurobi->setLowerBound( variableName, _tableau->getLowerBound( i ) ); + _gurobi->setUpperBound( variableName, _tableau->getUpperBound( i ) ); + } + _gurobi->updateModel(); + struct timespec end = TimeUtils::sampleMicro(); + _statistics.incLongAttribute( Statistics::TIME_ADDING_CONSTRAINTS_TO_MILP_SOLVER_MICRO, + TimeUtils::timePassed( start, end ) ); + } + else + { + // Bounds are already up-to-date in Tableau when using native Simplex. + return; + } +} + +bool Engine::minimizeCostWithGurobi( const LinearExpression &costFunction ) +{ + ASSERT( _gurobi && _milpEncoder ); + + struct timespec simplexStart = TimeUtils::sampleMicro(); + + _milpEncoder->encodeCostFunction( *_gurobi, costFunction ); + _gurobi->setTimeLimit( FloatUtils::infinity() ); + _gurobi->solve(); + + struct timespec simplexEnd = TimeUtils::sampleMicro(); + + _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, + TimeUtils::timePassed( simplexStart, simplexEnd ) ); + _statistics.incLongAttribute( Statistics::NUM_SIMPLEX_STEPS, + _gurobi->getNumberOfSimplexIterations() ); + + if ( _gurobi->infeasible() ) + throw InfeasibleQueryException(); + else if ( _gurobi->optimal() ) + return true; + else + throw CommonError( CommonError::UNEXPECTED_GUROBI_STATUS, + Stringf( "Current status: %u", _gurobi->getStatusCode() ).ascii() ); + + return false; +} + +void Engine::checkGurobiBoundConsistency() const +{ + if ( _gurobi && _milpEncoder ) + { + for ( unsigned i = 0; i < _preprocessedQuery->getNumberOfVariables(); ++i ) + { + String iName = _milpEncoder->getVariableNameFromVariable( i ); + double gurobiLowerBound = _gurobi->getLowerBound( iName ); + double lowerBound = _tableau->getLowerBound( i ); + if ( !FloatUtils::areEqual( gurobiLowerBound, lowerBound ) ) + { + throw MarabouError( MarabouError::BOUNDS_NOT_UP_TO_DATE_IN_LP_SOLVER, + Stringf( "x%u lower bound inconsistent!" + " Gurobi: %f, Tableau: %f", + i, + gurobiLowerBound, + lowerBound ) + .ascii() ); + } + double gurobiUpperBound = _gurobi->getUpperBound( iName ); + double upperBound = _tableau->getUpperBound( i ); + + if ( !FloatUtils::areEqual( gurobiUpperBound, upperBound ) ) + { + throw MarabouError( MarabouError::BOUNDS_NOT_UP_TO_DATE_IN_LP_SOLVER, + Stringf( "x%u upper bound inconsistent!" + " Gurobi: %f, Tableau: %f", + i, + gurobiUpperBound, + upperBound ) + .ascii() ); + } + } + } +} + +bool Engine::consistentBounds() const +{ + return _boundManager.consistentBounds(); +} + +Query Engine::buildQueryFromCurrentState() const +{ + Query query = *_preprocessedQuery; + for ( unsigned i = 0; i < query.getNumberOfVariables(); ++i ) + { + query.setLowerBound( i, _tableau->getLowerBound( i ) ); + query.setUpperBound( i, _tableau->getUpperBound( i ) ); + } + return query; +} + +void Engine::updateGroundUpperBound( const unsigned var, const double value ) +{ + ASSERT( var < _tableau->getN() && _produceUNSATProofs ); + if ( FloatUtils::lt( value, _groundBoundManager.getUpperBound( var ) ) ) + _groundBoundManager.setUpperBound( var, value ); +} + +void Engine::updateGroundLowerBound( const unsigned var, const double value ) +{ + ASSERT( var < _tableau->getN() && _produceUNSATProofs ); + if ( FloatUtils::gt( value, _groundBoundManager.getLowerBound( var ) ) ) + _groundBoundManager.setLowerBound( var, value ); +} + +double Engine::getGroundBound( unsigned var, bool isUpper ) const +{ + ASSERT( var < _tableau->getN() && _produceUNSATProofs ); + return isUpper ? _groundBoundManager.getUpperBound( var ) + : _groundBoundManager.getLowerBound( var ); +} + +bool Engine::shouldProduceProofs() const +{ + return _produceUNSATProofs; +} + +void Engine::explainSimplexFailure() +{ + ASSERT( _produceUNSATProofs ); + + DEBUG( checkGroundBounds() ); + + unsigned infeasibleVar = _boundManager.getInconsistentVariable(); + + if ( infeasibleVar == IBoundManager::NO_VARIABLE_FOUND || + !certifyInfeasibility( infeasibleVar ) ) + infeasibleVar = explainFailureWithTableau(); + + if ( infeasibleVar == IBoundManager::NO_VARIABLE_FOUND ) + infeasibleVar = explainFailureWithCostFunction(); + + if ( infeasibleVar == IBoundManager::NO_VARIABLE_FOUND ) + { + _costFunctionManager->computeCoreCostFunction(); + infeasibleVar = explainFailureWithCostFunction(); + } + + if ( infeasibleVar == IBoundManager::NO_VARIABLE_FOUND ) + { + markLeafToDelegate(); + return; + } + + ASSERT( infeasibleVar < _tableau->getN() ); + ASSERT( _UNSATCertificateCurrentPointer && + !( **_UNSATCertificateCurrentPointer ).getContradiction() ); + _statistics.incUnsignedAttribute( Statistics::NUM_CERTIFIED_LEAVES ); + + writeContradictionToCertificate( infeasibleVar ); + + ( **_UNSATCertificateCurrentPointer ).makeLeaf(); +} + +bool Engine::certifyInfeasibility( unsigned var ) const +{ + ASSERT( _produceUNSATProofs ); + + Vector contradiction = computeContradiction( var ); + + if ( contradiction.empty() ) + return FloatUtils::isNegative( _groundBoundManager.getUpperBound( var ) - + _groundBoundManager.getLowerBound( var ) ); + + SparseUnsortedList sparseContradiction = SparseUnsortedList(); + + contradiction.empty() + ? sparseContradiction.initializeToEmpty() + : sparseContradiction.initialize( contradiction.data(), contradiction.size() ); + + // In case contradiction is a vector of zeros + if ( sparseContradiction.empty() ) + return FloatUtils::isNegative( _groundBoundManager.getUpperBound( var ) - + _groundBoundManager.getLowerBound( var ) ); + + double derivedBound = + UNSATCertificateUtils::computeCombinationUpperBound( sparseContradiction, + _tableau->getSparseA(), + _groundBoundManager.getUpperBounds(), + _groundBoundManager.getLowerBounds(), + _tableau->getN() ); + return FloatUtils::isNegative( derivedBound ); +} + +double Engine::explainBound( unsigned var, bool isUpper ) const +{ + ASSERT( _produceUNSATProofs ); + + SparseUnsortedList explanation( 0 ); + + if ( !_boundManager.isExplanationTrivial( var, isUpper ) ) + explanation = _boundManager.getExplanation( var, isUpper ); + + if ( explanation.empty() ) + return isUpper ? _groundBoundManager.getUpperBound( var ) + : _groundBoundManager.getLowerBound( var ); + + return UNSATCertificateUtils::computeBound( var, + isUpper, + explanation, + _tableau->getSparseA(), + _groundBoundManager.getUpperBounds(), + _groundBoundManager.getLowerBounds(), + _tableau->getN() ); +} + +bool Engine::validateBounds( unsigned var, double epsilon, bool isUpper ) const +{ + ASSERT( _produceUNSATProofs ); + + double explained, real; + explained = explainBound( var, isUpper ); + if ( isUpper ) + { + real = _boundManager.getUpperBound( var ); + if ( explained - real > epsilon ) + { + ENGINE_LOG( "Var %d. Computed Upper %.5lf, real %.5lf. Difference is %.10lf\n", + var, + explained, + real, + abs( explained - real ) ); + return false; + } + } + else + { + real = _boundManager.getLowerBound( var ); + if ( explained - real < -epsilon ) + { + ENGINE_LOG( "Var %d. Computed Lower %.5lf, real %.5lf. Difference is %.10lf\n", + var, + explained, + real, + abs( explained - real ) ); + return false; + } + } + return true; +} + +bool Engine::validateAllBounds( double epsilon ) const +{ + ASSERT( _produceUNSATProofs ); + + bool res = true; + + for ( unsigned var = 0; var < _tableau->getN(); ++var ) + if ( !validateBounds( var, epsilon, Tightening::UB ) || + !validateBounds( var, epsilon, Tightening::LB ) ) + res = false; + + return res; +} + +bool Engine::checkGroundBounds() const +{ + ASSERT( _produceUNSATProofs ); + + for ( unsigned i = 0; i < _tableau->getN(); ++i ) + { + if ( FloatUtils::gt( _groundBoundManager.getLowerBound( i ), + _boundManager.getLowerBound( i ) ) || + FloatUtils::lt( _groundBoundManager.getUpperBound( i ), + _boundManager.getUpperBound( i ) ) ) + return false; + } + return true; +} + +unsigned Engine::explainFailureWithTableau() +{ + ASSERT( _produceUNSATProofs ); + + // Failure of a simplex step implies infeasible bounds imposed by the row + TableauRow boundUpdateRow = TableauRow( _tableau->getN() ); + + // For every basic, check that is has no slack and its explanations indeed prove a + // contradiction + unsigned basicVar; + + for ( unsigned i = 0; i < _tableau->getM(); ++i ) + { + if ( _tableau->basicOutOfBounds( i ) ) + { + _tableau->getTableauRow( i, &boundUpdateRow ); + basicVar = boundUpdateRow._lhs; + + if ( FloatUtils::gt( _boundManager.computeRowBound( boundUpdateRow, Tightening::LB ), + _boundManager.getUpperBound( basicVar ) ) && + explainAndCheckContradiction( basicVar, Tightening::LB, &boundUpdateRow ) ) + return basicVar; + + if ( FloatUtils::lt( _boundManager.computeRowBound( boundUpdateRow, Tightening::UB ), + _boundManager.getLowerBound( basicVar ) ) && + explainAndCheckContradiction( basicVar, Tightening::UB, &boundUpdateRow ) ) + return basicVar; + } + } + + return IBoundManager::NO_VARIABLE_FOUND; +} + +unsigned Engine::explainFailureWithCostFunction() +{ + ASSERT( _produceUNSATProofs ); + + // Failure of a simplex step might imply infeasible bounds imposed by the cost function + unsigned curBasicVar; + unsigned infVar = IBoundManager::NO_VARIABLE_FOUND; + double curCost; + bool curUpper; + const SparseUnsortedList *costRow = _costFunctionManager->createRowOfCostFunction(); + + for ( unsigned i = 0; i < _tableau->getM(); ++i ) + { + curBasicVar = _tableau->basicIndexToVariable( i ); + curCost = _costFunctionManager->getBasicCost( i ); + + if ( FloatUtils::isZero( curCost ) ) + continue; + + curUpper = ( curCost < 0 ); + + // Check the basic variable has no slack + if ( !( !curUpper && FloatUtils::gt( _boundManager.computeSparseRowBound( + *costRow, Tightening::LB, curBasicVar ), + _boundManager.getUpperBound( curBasicVar ) ) ) && + !( curUpper && FloatUtils::lt( _boundManager.computeSparseRowBound( + *costRow, Tightening::UB, curBasicVar ), + _boundManager.getLowerBound( curBasicVar ) ) ) ) + + continue; + + // Check the explanation indeed proves a contradiction + if ( explainAndCheckContradiction( curBasicVar, curUpper, costRow ) ) + { + infVar = curBasicVar; + break; + } + } + + delete costRow; + return infVar; +} + +bool Engine::explainAndCheckContradiction( unsigned var, bool isUpper, const TableauRow *row ) +{ + ASSERT( _produceUNSATProofs ); + + SparseUnsortedList backup( 0 ); + backup = _boundManager.getExplanation( var, isUpper ); + + _boundManager.updateBoundExplanation( *row, isUpper, var ); + + // Ensure the proof is correct + if ( certifyInfeasibility( var ) ) + return true; + + // If not, restores previous certificate if the proof is wrong + _boundManager.setExplanation( backup, var, isUpper ); + + return false; +} + +bool Engine::explainAndCheckContradiction( unsigned var, + bool isUpper, + const SparseUnsortedList *row ) +{ + ASSERT( _produceUNSATProofs ); + + SparseUnsortedList backup( 0 ); + backup = _boundManager.getExplanation( var, isUpper ); + + _boundManager.updateBoundExplanationSparse( *row, isUpper, var ); + + // Ensure the proof is correct + if ( certifyInfeasibility( var ) ) + return true; + + // If not, restores previous certificate if the proof is wrong + _boundManager.setExplanation( backup, var, isUpper ); + + return false; +} + +UnsatCertificateNode *Engine::getUNSATCertificateCurrentPointer() const +{ + return _UNSATCertificateCurrentPointer->get(); +} + +void Engine::setUNSATCertificateCurrentPointer( UnsatCertificateNode *node ) +{ + _UNSATCertificateCurrentPointer->set( node ); +} + +const UnsatCertificateNode *Engine::getUNSATCertificateRoot() const +{ + return _UNSATCertificate; +} + +bool Engine::certifyUNSATCertificate() +{ + ASSERT( _produceUNSATProofs && _UNSATCertificate && !_smtCore.getStackDepth() ); + + for ( auto &constraint : _plConstraints ) + { + if ( !UNSATCertificateUtils::getSupportedActivations().exists( constraint->getType() ) ) + { + String activationType = constraint->serializeToString().tokenize( "," ).back(); + printf( "Certification Error! Network contains activation function %s, that is not yet " + "supported by Marabou certification.\n", + activationType.ascii() ); + return false; + } + } + + struct timespec certificationStart = TimeUtils::sampleMicro(); + _precisionRestorer.restoreInitialEngineState( *this ); + + Vector groundUpperBounds( _tableau->getN(), 0 ); + Vector groundLowerBounds( _tableau->getN(), 0 ); + + for ( unsigned i = 0; i < _tableau->getN(); ++i ) + { + groundUpperBounds[i] = _groundBoundManager.getUpperBound( i ); + groundLowerBounds[i] = _groundBoundManager.getLowerBound( i ); + } + + if ( GlobalConfiguration::WRITE_JSON_PROOF ) + { + File file( JsonWriter::PROOF_FILENAME ); + JsonWriter::writeProofToJson( _UNSATCertificate, + _tableau->getM(), + _tableau->getSparseA(), + groundUpperBounds, + groundLowerBounds, + _plConstraints, + file ); + } + + Checker unsatCertificateChecker( _UNSATCertificate, + _tableau->getM(), + _tableau->getSparseA(), + groundUpperBounds, + groundLowerBounds, + _plConstraints ); + bool certificationSucceeded = unsatCertificateChecker.check(); + + _statistics.setLongAttribute( + Statistics::TOTAL_CERTIFICATION_TIME, + TimeUtils::timePassed( certificationStart, TimeUtils::sampleMicro() ) ); + printf( "Certification time: " ); + _statistics.printLongAttributeAsTime( + _statistics.getLongAttribute( Statistics::TOTAL_CERTIFICATION_TIME ) ); + + if ( certificationSucceeded ) + { + printf( "Certified\n" ); + _statistics.incUnsignedAttribute( Statistics::CERTIFIED_UNSAT ); + if ( _statistics.getUnsignedAttribute( Statistics::NUM_DELEGATED_LEAVES ) ) + printf( "Some leaves were delegated and need to be certified separately by an SMT " + "solver\n" ); + } + else + printf( "Error certifying UNSAT certificate\n" ); + + DEBUG( { + ASSERT( certificationSucceeded ); + if ( _statistics.getUnsignedAttribute( Statistics::NUM_POPS ) ) + { + double delegationRatio = + _statistics.getUnsignedAttribute( Statistics::NUM_DELEGATED_LEAVES ) / + _statistics.getUnsignedAttribute( Statistics::NUM_CERTIFIED_LEAVES ); + ASSERT( FloatUtils::lt( delegationRatio, 0.01 ) ); + } + } ); + + return certificationSucceeded; +} + +void Engine::markLeafToDelegate() +{ + ASSERT( _produceUNSATProofs ); + + // Mark leaf with toDelegate Flag + UnsatCertificateNode *currentUnsatCertificateNode = _UNSATCertificateCurrentPointer->get(); + ASSERT( _UNSATCertificateCurrentPointer && !currentUnsatCertificateNode->getContradiction() ); + currentUnsatCertificateNode->setDelegationStatus( DelegationStatus::DELEGATE_SAVE ); + currentUnsatCertificateNode->deletePLCExplanations(); + _statistics.incUnsignedAttribute( Statistics::NUM_DELEGATED_LEAVES ); + + if ( !currentUnsatCertificateNode->getChildren().empty() ) + currentUnsatCertificateNode->makeLeaf(); +} + +const Vector Engine::computeContradiction( unsigned infeasibleVar ) const +{ + ASSERT( _produceUNSATProofs ); + + unsigned m = _tableau->getM(); + SparseUnsortedList upperBoundExplanation( 0 ); + SparseUnsortedList lowerBoundExplanation( 0 ); + + if ( !_boundManager.isExplanationTrivial( infeasibleVar, Tightening::UB ) ) + upperBoundExplanation = _boundManager.getExplanation( infeasibleVar, Tightening::UB ); + + if ( !_boundManager.isExplanationTrivial( infeasibleVar, Tightening::LB ) ) + lowerBoundExplanation = _boundManager.getExplanation( infeasibleVar, Tightening::LB ); + + if ( upperBoundExplanation.empty() && lowerBoundExplanation.empty() ) + return Vector( 0 ); + + Vector contradiction = Vector( m, 0 ); + + if ( !upperBoundExplanation.empty() ) + for ( const auto &entry : upperBoundExplanation ) + contradiction[entry._index] = entry._value; + + if ( !lowerBoundExplanation.empty() ) + for ( const auto &entry : lowerBoundExplanation ) + contradiction[entry._index] -= entry._value; + + return contradiction; +} + +void Engine::writeContradictionToCertificate( unsigned infeasibleVar ) const +{ + ASSERT( _produceUNSATProofs ); + + Vector leafContradictionVec = computeContradiction( infeasibleVar ); + + Contradiction *leafContradiction = leafContradictionVec.empty() + ? new Contradiction( infeasibleVar ) + : new Contradiction( leafContradictionVec ); + ( **_UNSATCertificateCurrentPointer ).setContradiction( leafContradiction ); +} + +const BoundExplainer *Engine::getBoundExplainer() const +{ + return _boundManager.getBoundExplainer(); +} + +void Engine::setBoundExplainerContent( BoundExplainer *boundExplainer ) +{ + _boundManager.copyBoundExplainerContent( boundExplainer ); +} + +void Engine::propagateBoundManagerTightenings() +{ + _boundManager.propagateTightenings(); +} + +void Engine::extractBounds( IQuery &inputQuery ) +{ + for ( unsigned i = 0; i < inputQuery.getNumberOfVariables(); ++i ) + { + if ( _preprocessingEnabled ) + { + // Has the variable been merged into another? + unsigned variable = i; + while ( _preprocessor.variableIsMerged( variable ) ) + variable = _preprocessor.getMergedIndex( variable ); + + // Symbolically fixed variables are ignored + if ( _preprocessor.variableIsUnusedAndSymbolicallyFixed( i ) ) + { + inputQuery.tightenLowerBound( i, FloatUtils::negativeInfinity() ); + inputQuery.tightenUpperBound( i, FloatUtils::infinity() ); + continue; + } + + // Fixed variables are easy: return the value they've been fixed to. + if ( _preprocessor.variableIsFixed( variable ) ) + { + inputQuery.tightenLowerBound( i, _preprocessor.getFixedValue( variable ) ); + inputQuery.tightenUpperBound( i, _preprocessor.getFixedValue( variable ) ); + continue; + } + + // We know which variable to look for, but it may have been assigned + // a new index, due to variable elimination + variable = _preprocessor.getNewIndex( variable ); + + inputQuery.tightenLowerBound( i, _preprocessedQuery->getLowerBound( variable ) ); + inputQuery.tightenUpperBound( i, _preprocessedQuery->getUpperBound( variable ) ); + } + else + { + inputQuery.tightenLowerBound( i, _preprocessedQuery->getLowerBound( i ) ); + inputQuery.tightenUpperBound( i, _preprocessedQuery->getUpperBound( i ) ); + } + } +} + +void Engine::addPLCLemma( std::shared_ptr &explanation ) +{ + if ( !_produceUNSATProofs ) + return; + + ASSERT( explanation && _UNSATCertificate && _UNSATCertificateCurrentPointer ) + _statistics.incUnsignedAttribute( Statistics::NUM_LEMMAS ); + _UNSATCertificateCurrentPointer->get()->addPLCLemma( explanation ); +} \ No newline at end of file diff --git a/src/nlr/GlobalConfiguration.cpp b/src/nlr/GlobalConfiguration.cpp new file mode 100644 index 0000000000..3a08b92234 --- /dev/null +++ b/src/nlr/GlobalConfiguration.cpp @@ -0,0 +1,238 @@ +/********************* */ +/*! \file GlobalConfiguration.cpp + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Parth Shah, Derek Huang + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + + **/ + +#include "GlobalConfiguration.h" + +#include "DivideStrategy.h" +#include "MString.h" + +#include + + +// The exponential moving average is calculated as +// ema = current * alpha + previous * (1 - alpha) +const double GlobalConfiguration::EXPONENTIAL_MOVING_AVERAGE_ALPHA = 0.5; + +// Whether to use SoI instead of Reluplex for local search for satisfying assignments +// to non-linear constraint. +bool GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH = true; + +const double GlobalConfiguration::SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI = 5; + +// Use the polarity metrics to decide which branch to take first in a case split +// and how to repair a ReLU constraint. +const bool GlobalConfiguration::USE_POLARITY_BASED_DIRECTION_HEURISTICS = true; + +const double GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS = 0.0000000001; +const unsigned GlobalConfiguration::DEFAULT_DOUBLE_TO_STRING_PRECISION = 10; +const unsigned GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY = 10000; +const unsigned GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY_GUROBI = 100; +const double GlobalConfiguration::BOUND_COMPARISON_ADDITIVE_TOLERANCE = 0.0000001; +const double GlobalConfiguration::BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE = 0.001 * 0.0000001; +const double GlobalConfiguration::PIVOT_CHANGE_COLUMN_TOLERANCE = 0.000000001; +const double GlobalConfiguration::PIVOT_ROW_AND_COLUMN_TOLERANCE = 0.01; +const double GlobalConfiguration::ENTRY_ELIGIBILITY_TOLERANCE = 0.00000001; +const double GlobalConfiguration::RATIO_CONSTRAINT_ADDITIVE_TOLERANCE = 0.0000001 * 0.3; +const double GlobalConfiguration::RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE = + 0.001 * 0.0000001 * 0.3; +const double GlobalConfiguration::HARRIS_RATIO_CONSTRAINT_ADDITIVE_TOLERANCE = 0.0000001 * 0.5; +const double GlobalConfiguration::HARRIS_RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE = + 0.001 * 0.0000001 * 0.5; +const double GlobalConfiguration::BASIC_COSTS_ADDITIVE_TOLERANCE = 0.0000001; +const double GlobalConfiguration::BASIC_COSTS_MULTIPLICATIVE_TOLERANCE = 0.001 * 0.0000001; +const double GlobalConfiguration::SPARSE_FORREST_TOMLIN_DIAGONAL_ELEMENT_TOLERANCE = 0.00001; +const unsigned GlobalConfiguration::DEGRADATION_CHECKING_FREQUENCY = 100; +const double GlobalConfiguration::DEGRADATION_THRESHOLD = 0.1; +const double GlobalConfiguration::ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD = 0.0001; +const bool GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS = false; +const double GlobalConfiguration::GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD = 0.1; +const unsigned GlobalConfiguration::MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS = 5; +const DivideStrategy GlobalConfiguration::SPLITTING_HEURISTICS = DivideStrategy::ReLUViolation; +const unsigned GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY = 10; +const unsigned GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD = 10; +const unsigned GlobalConfiguration::BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY = 100; +const unsigned GlobalConfiguration::ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS = 20; +const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; + +const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 1000; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 100; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.1; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.05; + +const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; + +const double GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT = 0.00000000001; +const double GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT = 0.00000001; + +const double GlobalConfiguration::SIGMOID_CUTOFF_CONSTANT = 20; + +const bool GlobalConfiguration::PREPROCESS_INPUT_QUERY = true; +const bool GlobalConfiguration::PREPROCESSOR_ELIMINATE_VARIABLES = true; +const bool GlobalConfiguration::PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; +const bool GlobalConfiguration::NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; +const double GlobalConfiguration::PREPROCESSOR_ALMOST_FIXED_THRESHOLD = 0.00001; + +const unsigned GlobalConfiguration::PREPROCESSSING_MAX_TIGHTEING_ROUND = 10; + +const bool GlobalConfiguration::WARM_START = false; + +const unsigned GlobalConfiguration::MAX_ITERATIONS_WITHOUT_PROGRESS = 10000; + +const unsigned GlobalConfiguration::PSE_ITERATIONS_BEFORE_RESET = 1000; +const double GlobalConfiguration::PSE_GAMMA_ERROR_THRESHOLD = 0.001; +const double GlobalConfiguration::PSE_GAMMA_UPDATE_TOLERANCE = 0.000000001; + +const double GlobalConfiguration::CONSTRAINT_COMPARISON_TOLERANCE = 0.00001; + +const double GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD = 0.6; + +const bool GlobalConfiguration::ONLY_AUX_INITIAL_BASIS = false; + +const GlobalConfiguration::ExplicitBasisBoundTighteningType + GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE = + GlobalConfiguration::COMPUTE_INVERTED_BASIS_MATRIX; +const bool GlobalConfiguration::EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION = false; +const double GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_ROUNDING_CONSTANT = 1e-6; + +const unsigned GlobalConfiguration::REFACTORIZATION_THRESHOLD = 100; +const GlobalConfiguration::BasisFactorizationType GlobalConfiguration::BASIS_FACTORIZATION_TYPE = + GlobalConfiguration::SPARSE_FORREST_TOMLIN_FACTORIZATION; + +const unsigned GlobalConfiguration::POLARITY_CANDIDATES_THRESHOLD = 5; + +const unsigned GlobalConfiguration::DNC_DEPTH_THRESHOLD = 5; + +const double GlobalConfiguration::MINIMAL_COEFFICIENT_FOR_TIGHTENING = 0.01; +const double GlobalConfiguration::LEMMA_CERTIFICATION_TOLERANCE = 0.000001; +const bool GlobalConfiguration::WRITE_JSON_PROOF = false; + +const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; +const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; + +#ifdef ENABLE_GUROBI +const unsigned GlobalConfiguration::GUROBI_NUMBER_OF_THREADS = 1; +const bool GlobalConfiguration::GUROBI_LOGGING = false; +#endif // ENABLE_GUROBI + +// Logging - note that it is enabled only in Debug mode +const bool GlobalConfiguration::DNC_MANAGER_LOGGING = false; +const bool GlobalConfiguration::ENGINE_LOGGING = false; +const bool GlobalConfiguration::TABLEAU_LOGGING = false; +const bool GlobalConfiguration::SMT_CORE_LOGGING = false; +const bool GlobalConfiguration::DANTZIGS_RULE_LOGGING = false; +const bool GlobalConfiguration::BASIS_FACTORIZATION_LOGGING = false; +const bool GlobalConfiguration::PREPROCESSOR_LOGGING = false; +const bool GlobalConfiguration::INPUT_QUERY_LOGGING = false; +const bool GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING = false; +const bool GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING = false; +const bool GlobalConfiguration::QUERY_LOADER_LOGGING = false; +const bool GlobalConfiguration::SYMBOLIC_BOUND_TIGHTENER_LOGGING = false; +const bool GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING = false; +const bool GlobalConfiguration::MPS_PARSER_LOGGING = false; +const bool GlobalConfiguration::ONNX_PARSER_LOGGING = false; +const bool GlobalConfiguration::SOI_LOGGING = false; +const bool GlobalConfiguration::SCORE_TRACKER_LOGGING = false; +const bool GlobalConfiguration::CEGAR_LOGGING = false; + +const bool GlobalConfiguration::USE_SMART_FIX = false; +const bool GlobalConfiguration::USE_LEAST_FIX = false; + +void GlobalConfiguration::print() +{ + printf( "****************************\n" ); + printf( "*** Global Configuraiton ***\n" ); + printf( "****************************\n" ); + printf( " DEFAULT_EPSILON_FOR_COMPARISONS: %.15lf\n", DEFAULT_EPSILON_FOR_COMPARISONS ); + printf( " DEFAULT_DOUBLE_TO_STRING_PRECISION: %u\n", DEFAULT_DOUBLE_TO_STRING_PRECISION ); + printf( " STATISTICS_PRINTING_FREQUENCY: %u\n", STATISTICS_PRINTING_FREQUENCY ); + printf( " BOUND_COMPARISON_ADDITIVE_TOLERANCE: %.15lf\n", + BOUND_COMPARISON_ADDITIVE_TOLERANCE ); + printf( " BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE: %.15lf\n", + BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE ); + printf( " PIVOT_CHANGE_COLUMN_TOLERANCE: %.15lf\n", PIVOT_CHANGE_COLUMN_TOLERANCE ); + printf( " RATIO_CONSTRAINT_ADDITIVE_TOLERANCE: %.15lf\n", + RATIO_CONSTRAINT_ADDITIVE_TOLERANCE ); + printf( " RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE: %.15lf\n", + RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE ); + printf( " BASIC_COSTS_ADDITIVE_TOLERANCE: %.15lf\n", BASIC_COSTS_ADDITIVE_TOLERANCE ); + printf( " BASIC_COSTS_MULTIPLICATIVE_TOLERANCE: %.15lf\n", + BASIC_COSTS_MULTIPLICATIVE_TOLERANCE ); + printf( " DEGRADATION_CHECKING_FREQUENCY: %u\n", DEGRADATION_CHECKING_FREQUENCY ); + printf( " DEGRADATION_THRESHOLD: %.15lf\n", DEGRADATION_THRESHOLD ); + printf( " ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD: %.15lf\n", ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD ); + printf( " USE_COLUMN_MERGING_EQUATIONS: %s\n", USE_COLUMN_MERGING_EQUATIONS ? "Yes" : "No" ); + printf( " GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD: %.15lf\n", + GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD ); + printf( " MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS: %u\n", MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS ); + printf( " BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY: %u\n", + BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY ); + printf( " COST_FUNCTION_ERROR_THRESHOLD: %.15lf\n", COST_FUNCTION_ERROR_THRESHOLD ); + printf( " USE_HARRIS_RATIO_TEST: %s\n", USE_HARRIS_RATIO_TEST ? "Yes" : "No" ); + + printf( " PREPROCESS_INPUT_QUERY: %s\n", PREPROCESS_INPUT_QUERY ? "Yes" : "No" ); + printf( " PREPROCESSOR_ELIMINATE_VARIABLES: %s\n", + PREPROCESSOR_ELIMINATE_VARIABLES ? "Yes" : "No" ); + printf( " PSE_ITERATIONS_BEFORE_RESET: %u\n", PSE_ITERATIONS_BEFORE_RESET ); + printf( " PSE_GAMMA_ERROR_THRESHOLD: %.15lf\n", PSE_GAMMA_ERROR_THRESHOLD ); + printf( " CONSTRAINT_COMPARISON_TOLERANCE: %.15lf\n", CONSTRAINT_COMPARISON_TOLERANCE ); + + String basisBoundTighteningType; + switch ( EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE ) + { + case COMPUTE_INVERTED_BASIS_MATRIX: + basisBoundTighteningType = "Compute inverted basis matrix"; + break; + + case USE_IMPLICIT_INVERTED_BASIS_MATRIX: + basisBoundTighteningType = "Use implicit inverted basis matrix"; + break; + + default: + basisBoundTighteningType = "Unknown"; + break; + } + + printf( " EXPLICIT_BASIS_BOUND_TIGHTENING_INVERT_BASIS: %s\n", + basisBoundTighteningType.ascii() ); + printf( " EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION: %s\n", + EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION ? "Yes" : "No" ); + printf( " REFACTORIZATION_THRESHOLD: %u\n", REFACTORIZATION_THRESHOLD ); + + String basisFactorizationType; + if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == GlobalConfiguration::LU_FACTORIZATION ) + basisFactorizationType = "LU_FACTORIZATION"; + else if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == + GlobalConfiguration::SPARSE_LU_FACTORIZATION ) + basisFactorizationType = "SPARSE_LU_FACTORIZATION"; + else if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == + GlobalConfiguration::FORREST_TOMLIN_FACTORIZATION ) + basisFactorizationType = "FORREST_TOMLIN_FACTORIZATION"; + else + basisFactorizationType = "Unknown"; + + printf( " BASIS_FACTORIZATION_TYPE: %s\n", basisFactorizationType.ascii() ); + printf( "****************************\n" ); +} + +// +// Local Variables: +// compile-command: "make -C ../.. " +// tags-file-name: "../../TAGS" +// c-basic-offset: 4 +// End: +// diff --git a/src/nlr/GlobalConfiguration.h b/src/nlr/GlobalConfiguration.h new file mode 100644 index 0000000000..78ab9f662e --- /dev/null +++ b/src/nlr/GlobalConfiguration.h @@ -0,0 +1,315 @@ +/********************* */ +/*! \file GlobalConfiguration.h + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Parth Shah, Derek Huang + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + + **/ + +#ifndef __GlobalConfiguration_h__ +#define __GlobalConfiguration_h__ + +#include "DivideStrategy.h" + +class GlobalConfiguration +{ +public: + static void print(); + + // The exponential moving average is calculated as + // ema = current * alpha + previous * (1 - alpha) + static const double EXPONENTIAL_MOVING_AVERAGE_ALPHA; + + // Whether to use SoI instead of Reluplex for local search for satisfying assignments + // to non-linear constraint. + static bool USE_DEEPSOI_LOCAL_SEARCH; + + // The quantity by which the score is bumped up for PLContraints not + // participating in the SoI. This promotes those constraints in the branching + // order. + static const double SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI; + + // Use the polarity metrics to decide which branch to take first in a case split + // and how to repair a ReLU constraint. + static const bool USE_POLARITY_BASED_DIRECTION_HEURISTICS; + + // The default epsilon used for comparing doubles + static const double DEFAULT_EPSILON_FOR_COMPARISONS; + + // The precision level when convering doubles to strings + static const unsigned DEFAULT_DOUBLE_TO_STRING_PRECISION; + + // How often should the main loop print statistics? + static const unsigned STATISTICS_PRINTING_FREQUENCY; + static const unsigned STATISTICS_PRINTING_FREQUENCY_GUROBI; + + // Tolerance when checking whether the value computed for a basic variable is out of bounds + static const double BOUND_COMPARISON_ADDITIVE_TOLERANCE; + static const double BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE; + + // Tolerance when checking whether a basic variable depends on a non-basic variable, by looking + // at the change column, as part of a pivot operation. + static const double PIVOT_CHANGE_COLUMN_TOLERANCE; + + // Tolerance for the difference when computing the pivot entry by column and by row + static const double PIVOT_ROW_AND_COLUMN_TOLERANCE; + + // Tolerance when checking whether a non-basic variable is eligible for being selected as the + // entering variable, by its reduced cost + static const double ENTRY_ELIGIBILITY_TOLERANCE; + + // Ratio test tolerance constants + static const double RATIO_CONSTRAINT_ADDITIVE_TOLERANCE; + static const double RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE; + static const double HARRIS_RATIO_CONSTRAINT_ADDITIVE_TOLERANCE; + static const double HARRIS_RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE; + + // Cost function tolerance constants + static const double BASIC_COSTS_ADDITIVE_TOLERANCE; + static const double BASIC_COSTS_MULTIPLICATIVE_TOLERANCE; + + // Sparse ForrestTomlin diagonal element tolerance constant + static const double SPARSE_FORREST_TOMLIN_DIAGONAL_ELEMENT_TOLERANCE; + + // Toggle use of Harris' two-pass ratio test for selecting the leaving variable + static const bool USE_HARRIS_RATIO_TEST; + + // Toggle query-preprocessing on/off. + static const bool PREPROCESS_INPUT_QUERY; + + // Assuming the preprocessor is on, toggle whether or not it will attempt to perform variable + // elimination. + static const bool PREPROCESSOR_ELIMINATE_VARIABLES; + + // Toggle whether or not PL/NL constraints will be called upon + // to add auxiliary variables and equations after preprocessing. + static const bool PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING; + static const bool NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING; + + // If the difference between a variable's lower and upper bounds is smaller than this + // threshold, the preprocessor will treat it as fixed. + static const double PREPROCESSOR_ALMOST_FIXED_THRESHOLD; + + // Maximal rounds of tightening to perform in the preprocessor to avoid non-termination. + static const unsigned PREPROCESSSING_MAX_TIGHTEING_ROUND; + + // Try to set the initial tableau assignment to an assignment that is legal with + // respect to the input network. + static const bool WARM_START; + + // The maximal number of iterations without new tree states being visited, before + // the engine performs a precision restoration. + static const unsigned MAX_ITERATIONS_WITHOUT_PROGRESS; + + // How often should the main loop check the current degradation? + static const unsigned DEGRADATION_CHECKING_FREQUENCY; + + // The threshold of degradation above which restoration is required + static const double DEGRADATION_THRESHOLD; + + // If a pivot element in a simplex iteration is smaller than this threshold, the engine will + // attempt to pick another element. + static const double ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD; + + // If true, column-merging equations are given special treatment and cause columns in the + // tableau to be merged (instead of a new row added). + static const bool USE_COLUMN_MERGING_EQUATIONS; + + // If a pivot element in a Gaussian elimination iteration is smaller than this threshold times + // the largest element in the column, the elimination engine will attempt to pick another pivot. + static const double GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD; + + // How many potential pivots should the engine inspect (at most) in every simplex iteration? + static const unsigned MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS; + + static const DivideStrategy SPLITTING_HEURISTICS; + + // The frequency to use interval splitting when largest interval splitting strategy is in use. + static const unsigned INTERVAL_SPLITTING_FREQUENCY; + + // When automatically deciding which splitting strategy to use, we use relu-splitting if + // the number of inputs is larger than this number. + static const unsigned INTERVAL_SPLITTING_THRESHOLD; + + // How often should we perform full bound tightening, on the entire contraints matrix A. + static const unsigned BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY; + + // When the row bound tightener is asked to run until saturation, it can enter an infinite loop + // due to tiny increments in bounds. This number limits the number of iterations it can perform. + static const unsigned ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS; + + // If the cost function error exceeds this threshold, it is recomputed + static const double COST_FUNCTION_ERROR_THRESHOLD; + + // Random seed for generating simulation values. + static const unsigned SIMULATION_RANDOM_SEED; + + // Random seed for EstimateVolume procedure (PreimageApproximation). + static const unsigned VOLUME_ESTIMATION_RANDOM_SEED; + + // Number of iterations for EstimateVolume procedure (PreimageApproximation). + static const unsigned VOLUME_ESTIMATION_ITERATIONS; + + // Random seed for PreimageApproximation optimization. + static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED; + + // Maximum iterations for PreimageApproximation optimization. + static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + + // Step size for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + + // How often should projected steepest edge reset the reference space? + static const unsigned PSE_ITERATIONS_BEFORE_RESET; + + // An error threshold which, when crossed, causes projected steepest edge to reset the reference + // space + static const double PSE_GAMMA_ERROR_THRESHOLD; + + // PSE's Gamma function's update tolerance + static const double PSE_GAMMA_UPDATE_TOLERANCE; + + // The tolerance for checking whether f = Constraint( b ), Constraint \in { ReLU, ABS, Sign} + static const double CONSTRAINT_COMPARISON_TOLERANCE; + + // Toggle between two types of LSE lower bound for softmax + static const double SOFTMAX_LSE2_THRESHOLD; + + // Should the initial basis be comprised only of auxiliary (row) variables? + static const bool ONLY_AUX_INITIAL_BASIS; + + /* + Explicit (Reluplex-style) bound tightening options + */ + + enum ExplicitBasisBoundTighteningType { + // Compute the inverse basis matrix and use it + COMPUTE_INVERTED_BASIS_MATRIX = 0, + // Use the inverted basis matrix without using it, via transformations + USE_IMPLICIT_INVERTED_BASIS_MATRIX = 1, + // Disable explicit basis bound tightening + DISABLE_EXPLICIT_BASIS_TIGHTENING = 2, + }; + + // When doing bound tightening using the explicit basis matrix, should the basis matrix be + // inverted? + static const ExplicitBasisBoundTighteningType EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE; + static const double EXPLICIT_BASIS_BOUND_TIGHTENING_ROUNDING_CONSTANT; + + // When doing explicit bound tightening, should we repeat until saturation? + static const bool EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION; + + /* + Symbolic bound tightening options + */ + + // Symbolic tightening, LP rounding constants + static const double SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; + static const double LP_TIGHTENING_ROUNDING_CONSTANT; + + static const double SIGMOID_CUTOFF_CONSTANT; + + /* + Constraint fixing heuristics + */ + + // When a PL constraint proposes a fix that affects multiple variables, should it first query + // for any relevant linear connections between the variables? + static const bool USE_SMART_FIX; + + // A heuristic for selecting which of the broken PL constraints will be repaired next. In this + // case, the one that has been repaired the least number of times so far. + static const bool USE_LEAST_FIX; + + /* + Basis factorization options + */ + + // The number of accumualted eta matrices, after which the basis will be refactorized + static const unsigned REFACTORIZATION_THRESHOLD; + + // The kind of basis factorization algorithm in use + enum BasisFactorizationType { + LU_FACTORIZATION, + SPARSE_LU_FACTORIZATION, + FORREST_TOMLIN_FACTORIZATION, + SPARSE_FORREST_TOMLIN_FACTORIZATION, + }; + static const BasisFactorizationType BASIS_FACTORIZATION_TYPE; + + /* In the polarity-based branching heuristics, only this many earliest nodes + are considered to branch on. + */ + static const unsigned POLARITY_CANDIDATES_THRESHOLD; + + /* The max number of DnC splits + */ + static const unsigned DNC_DEPTH_THRESHOLD; + + /* Minimal coefficient of a variable in a Tableau row, that is used for bound tightening + */ + static const double MINIMAL_COEFFICIENT_FOR_TIGHTENING; + + /* The tolerance of errors when checking lemmas in the proof-checking process + */ + static const double LEMMA_CERTIFICATION_TOLERANCE; + + /* Denote whether proofs should be written as a JSON file + */ + static const bool WRITE_JSON_PROOF; + + /* How many layers after the current layer do we encode in backward analysis. + */ + static const unsigned BACKWARD_BOUND_PROPAGATION_DEPTH; + + /* How many rounds of backward analysis to perform? + */ + static const unsigned MAX_ROUNDS_OF_BACKWARD_ANALYSIS; + +#ifdef ENABLE_GUROBI + /* + The number of threads Gurobi spawns + */ + static const unsigned GUROBI_NUMBER_OF_THREADS; + static const bool GUROBI_LOGGING; +#endif // ENABLE_GUROBI + + /* + Logging options + */ + static const bool DNC_MANAGER_LOGGING; + static const bool ENGINE_LOGGING; + static const bool TABLEAU_LOGGING; + static const bool SMT_CORE_LOGGING; + static const bool DANTZIGS_RULE_LOGGING; + static const bool BASIS_FACTORIZATION_LOGGING; + static const bool PREPROCESSOR_LOGGING; + static const bool INPUT_QUERY_LOGGING; + static const bool PROJECTED_STEEPEST_EDGE_LOGGING; + static const bool GAUSSIAN_ELIMINATION_LOGGING; + static const bool QUERY_LOADER_LOGGING; + static const bool SYMBOLIC_BOUND_TIGHTENER_LOGGING; + static const bool NETWORK_LEVEL_REASONER_LOGGING; + static const bool MPS_PARSER_LOGGING; + static const bool ONNX_PARSER_LOGGING; + static const bool SOI_LOGGING; + static const bool SCORE_TRACKER_LOGGING; + static const bool CEGAR_LOGGING; +}; + +#endif // __GlobalConfiguration_h__ + +// +// Local Variables: +// compile-command: "make -C .. " +// tags-file-name: "../TAGS" +// c-basic-offset: 4 +// End: +// diff --git a/src/nlr/GradientDescent.h b/src/nlr/GradientDescent.h new file mode 100644 index 0000000000..e86aca17d9 --- /dev/null +++ b/src/nlr/GradientDescent.h @@ -0,0 +1,315 @@ +/* + * Copyright Nick Thompson, 2024 + * Use, modification and distribution are subject to the + * Boost Software License, Version 1.0. (See accompanying file + * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) + */ +#ifndef GRADIENT_DESCENT +#define GRADIENT_DESCENT + +#include "FloatUtils.h" +#include "GlobalConfiguration.h" + +#include // for std::sort +#include +#include +#include +#include +#include +#include +#include // for std::false_type +#include +#include + +namespace NLR { + +template struct has_resize : std::false_type +{ +}; + +template +struct has_resize().resize( size_t{} ) )>> : std::true_type +{ +}; + +template constexpr bool has_resize_v = has_resize::value; + +template +void validate_bounds( ArgumentContainer const &lower_bounds, ArgumentContainer const &upper_bounds ) +{ + std::ostringstream oss; + + if ( lower_bounds.size() == 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The dimension of the problem cannot be zero."; + throw std::domain_error( oss.str() ); + } + + if ( upper_bounds.size() != lower_bounds.size() ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be the same number of lower bounds as upper bounds, but given "; + oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() + << " lower bounds."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < lower_bounds.size(); ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + if ( lb > ub ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be greater than or equal to the lower bound, but the " + "upper bound is " + << ub << " and the lower is " << lb << "."; + throw std::domain_error( oss.str() ); + } + + if ( !FloatUtils::isFinite( lb ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The lower bound must be finite, but got " << lb << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a " + "standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); + } + + if ( !FloatUtils::isFinite( ub ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The upper bound must be finite, but got " << ub << "."; + oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a " + "standard infinite->finite " + "transform."; + throw std::domain_error( oss.str() ); + } + } +} + +template +std::vector random_initial_population( ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds, + size_t initial_population_size, + URBG &&gen ) +{ + using Real = typename ArgumentContainer::value_type; + using DimensionlessReal = decltype( Real() / Real() ); + constexpr bool has_resize = has_resize_v; + std::vector population( initial_population_size ); + auto const dimension = lower_bounds.size(); + + for ( size_t i = 0; i < population.size(); ++i ) + { + if constexpr ( has_resize ) + population[i].resize( dimension ); + else + { + // Argument type must be known at compile-time; like std::array: + if ( population[i].size() != dimension ) + { + std::ostringstream oss; + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": For containers which do not have resize, the default size must be the " + "same as the dimension, "; + oss << "but the default container size is " << population[i].size() + << " and the dimension of the problem is " << dimension << "."; + oss << " The function argument container type is " + << typeid( ArgumentContainer ).name() << ".\n"; + throw std::runtime_error( oss.str() ); + } + } + } + + // Why don't we provide an option to initialize with (say) a Gaussian distribution? + // > If the optimum's location is fairly well known, + // > a Gaussian distribution may prove somewhat faster, although it + // > may also increase the probability that the population will converge prematurely. + // > In general, uniform distributions are preferred, since they best reflect + // > the lack of knowledge about the optimum's location. + // - Differential Evolution: A Practical Approach to Global Optimization + // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are + // preferable. So this is something that could be investigated and potentially improved. + std::uniform_real_distribution dis( DimensionlessReal( 0 ), + DimensionlessReal( 1 ) ); + for ( size_t i = 0; i < population.size(); ++i ) + { + for ( size_t j = 0; j < dimension; ++j ) + { + auto const &lb = lower_bounds[j]; + auto const &ub = upper_bounds[j]; + population[i][j] = lb + dis( gen ) * ( ub - lb ); + } + } + + return population; +} + +template +void validate_initial_guess( ArgumentContainer const &initial_guess, + ArgumentContainer const &lower_bounds, + ArgumentContainer const &upper_bounds ) +{ + std::ostringstream oss; + auto const dimension = lower_bounds.size(); + + if ( initial_guess.size() != dimension ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": The initial guess must have the same dimensions as the problem,"; + oss << ", but the problem size is " << dimension << " and the initial guess has " + << initial_guess.size() << " elements."; + throw std::domain_error( oss.str() ); + } + + for ( size_t i = 0; i < dimension; ++i ) + { + auto lb = lower_bounds[i]; + auto ub = upper_bounds[i]; + + if ( !FloatUtils::isFinite( initial_guess[i] ) ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << ", the initial guess is " << initial_guess[i] + << ", make sure all elements of the initial guess are finite."; + throw std::domain_error( oss.str() ); + } + + if ( initial_guess[i] < lb || initial_guess[i] > ub ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": At index " << i << " the initial guess " << initial_guess[i] + << " is not in the bounds [" << lb << ", " << ub << "]."; + throw std::domain_error( oss.str() ); + } + } +} + +// Single-Threaded Gradient Descent + +// We provide the parameters in a struct-there are too many of them and they are too unwieldy to +// pass individually: +template struct gradient_descent_parameters +{ + using Real = typename ArgumentContainer::value_type; + ArgumentContainer lower_bounds; + ArgumentContainer upper_bounds; + + Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + // size_t max_iterations = + // GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + size_t max_iterations = 2; + bool maximize = false; + Real weight_decay = 0; + // Real lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + Real lr = 0.001; + ArgumentContainer const *initial_guess = nullptr; +}; + +template +void validate_gradient_descent_parameters( + gradient_descent_parameters const &cd_params ) +{ + std::ostringstream oss; + validate_bounds( cd_params.lower_bounds, cd_params.upper_bounds ); + + if ( FloatUtils::isNan( cd_params.step_size ) || cd_params.step_size <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": step_size > 0 is required, but got step_size=" << cd_params.step_size << "."; + throw std::domain_error( oss.str() ); + } + + if ( cd_params.max_iterations < 1 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": There must be at least one generation."; + throw std::invalid_argument( oss.str() ); + } + + if ( FloatUtils::isNan( cd_params.lr ) || cd_params.lr <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": lr > 0 is required, but got lr=" << cd_params.lr << "."; + throw std::domain_error( oss.str() ); + } + + if ( FloatUtils::isNan( cd_params.weight_decay ) || cd_params.weight_decay < 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": weight_decay >= 0 is required, but got weight_decay=" << cd_params.weight_decay + << "."; + throw std::domain_error( oss.str() ); + } + + if ( cd_params.initial_guess ) + { + validate_initial_guess( + *cd_params.initial_guess, cd_params.lower_bounds, cd_params.upper_bounds ); + } +} + +template +ArgumentContainer gradient_descent( + const Func cost_function, + gradient_descent_parameters const &cd_params, + URBG &gen, + std::invoke_result_t target_value, + bool *cancellation = nullptr, + std::vector>> + *queries = nullptr, + std::invoke_result_t *current_minimum_cost = nullptr ) +{ + using ResultType = std::invoke_result_t; + + validate_gradient_descent_parameters( cd_params ); + const size_t dimension = cd_params.lower_bounds.size(); + auto step_size = cd_params.step_size; + auto max_iterations = cd_params.max_iterations; + auto maximize = cd_params.maximize; + auto lr = cd_params.lr; + auto weight_decay = cd_params.weight_decay; + auto guess = + random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); + + if ( cd_params.initial_guess ) + { + guess[0] = *cd_params.initial_guess; + } + + std::vector candidates( dimension ); + std::vector gradient( dimension ); + ArgumentContainer current_minimum = guess[0]; + + for ( size_t i = 0; i < max_iterations; ++i ) + { + for ( size_t j = 0; j < dimension; ++j ) + { + candidates[j] = guess[0]; + candidates[j][j] += step_size; + + size_t sign = ( maximize == false ? 1 : -1 ); + double cost = cost_function( candidates[j] ); + gradient[j] = sign * cost / step_size + weight_decay * guess[0][j]; + + if ( !FloatUtils::isNan( target_value ) && cost <= target_value ) + { + guess[0] = candidates[j]; + break; + } + } + + for ( size_t j = 0; j < dimension; ++j ) + { + guess[0][j] -= lr * gradient[j]; + } + } + + return guess[0]; +} + +} // namespace NLR +#endif diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 2189b563ca..c7f3691157 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -326,7 +326,7 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map // Clean up clearSolverQueue( freeSolvers ); - + if ( threads ) { delete[] threads; @@ -339,42 +339,42 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map void LPFormulator::optimizeBoundsWithInvprop( const Map &layers ) { - } void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map &layers ) { // Define EstimateVolume bind which captures layers and only receives coeffs as its input. auto EstimateVolumeBind = std::bind( EstimateVolume, layers, std::placeholders::_1 ); - + // Search over coeffs in [0, 1]^depth. Enforce single-threaded optimization. - auto opt_params = coordinate_descent_parameters>(); + auto opt_params = adam_optimizer_parameters>(); unsigned depth = layers.size(); opt_params.lower_bounds.resize( depth, 0 ); opt_params.upper_bounds.resize( depth, 1 ); double value_to_reach = 0; std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); - - // Find optimal coeffs and run final parameterised symbolic bound tightening + backward LP relaxation. - auto optimal_coeffs = coordinate_descent( EstimateVolumeBind, opt_params, rng, value_to_reach ); - + + // Find optimal coeffs and run final parameterised symbolic bound tightening + backward LP + // relaxation. + auto optimal_coeffs = adam_optimizer( EstimateVolumeBind, opt_params, rng, value_to_reach ); + for ( unsigned i = 0; i < layers.size(); ++i ) layers[i]->computeParameterisedSymbolicBounds( optimal_coeffs[i], true ); - + optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); } -double LPFormulator::EstimateVolume( const Map &layers, std::vector coeffs ) +double LPFormulator::EstimateVolume( const Map &layers, + std::vector coeffs ) { // First, run parameterised symbolic bound propagation. for ( unsigned i = 0; i < layers.size(); ++i ) layers[i]->computeParameterisedSymbolicBounds( coeffs[i] ); - + std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); double average = 0; for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) { - // Sample point from known bounds. Map point; for ( auto pair : layers ) @@ -385,16 +385,17 @@ double LPFormulator::EstimateVolume( const Map &layers, std:: { if ( layer->neuronEliminated( index ) ) continue; - + const NeuronIndex neuron( layerIndex, index ); double lb = layer->getLb( index ); double ub = layer->getUb( index ); std::uniform_real_distribution<> dis( lb, ub ); - point.insert( neuron , dis( rng ) ); + point.insert( neuron, dis( rng ) ); } } - - // Compute the average sigmoid of log-sum-exp of points' difference from bounding hyperplanes. + + // Compute the average sigmoid of log-sum-exp of points' difference from bounding + // hyperplanes. double sum_exp = 0; for ( auto pair : layers ) { @@ -403,17 +404,17 @@ double LPFormulator::EstimateVolume( const Map &layers, std:: { if ( layer->neuronEliminated( index ) ) continue; - + double margin = layer->calculateDifferenceFromSymbolic( point, index ); if ( FloatUtils::wellFormed( std::exp( margin ) ) ) sum_exp += std::exp( margin ); } } - + double slse = 1 / ( 1 + ( 1 / sum_exp ) ); average += slse / GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; } - + return average; } @@ -490,7 +491,9 @@ void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map coeffs ) +void LPFormulator::optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, + bool backward, + std::vector coeffs ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -581,16 +584,16 @@ void LPFormulator::optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args threads[i].interrupt(); threads[i].join(); } - + // Clean up clearSolverQueue( freeSolvers ); - + if ( threads ) { delete[] threads; threads = NULL; } - + throw InfeasibleQueryException(); } @@ -666,8 +669,9 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & { LPFormulator_LOG( Stringf( "Computing upperbound..." ).ascii() ); double ub = optimizeWithGurobi( - *gurobi, MinOrMax::MAX, variableName, cutoffValue, &infeasible ) + - GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT;; + *gurobi, MinOrMax::MAX, variableName, cutoffValue, &infeasible ) + + GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT; + ; LPFormulator_LOG( Stringf( "Upperbound computed %f", ub ).ascii() ); // Store the new bound if it is tighter @@ -697,8 +701,8 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & LPFormulator_LOG( Stringf( "Computing lowerbound..." ).ascii() ); gurobi->reset(); double lb = optimizeWithGurobi( - *gurobi, MinOrMax::MIN, variableName, cutoffValue, &infeasible ) - - GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT; + *gurobi, MinOrMax::MIN, variableName, cutoffValue, &infeasible ) - + GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT; LPFormulator_LOG( Stringf( "Lowerbound computed: %f", lb ).ascii() ); // Store the new bound if it is tighter if ( lb > currentLb ) @@ -773,7 +777,7 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers double coeff = coeffs[currentLayerIndex]; addLayerToParameterisedModel( gurobi, currentLayer, true, coeff ); } - + for ( const auto &nextLayer : currentLayer->getSuccessorLayers() ) { if ( layerToDepth.exists( nextLayer ) ) @@ -802,7 +806,7 @@ void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, case Layer::WEIGHTED_SUM: addWeightedSumLayerToLpRelaxation( gurobi, layer, createVariables ); break; - + case Layer::ROUND: addRoundLayerToLpRelaxation( gurobi, layer, createVariables ); break; @@ -810,7 +814,7 @@ void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, case Layer::LEAKY_RELU: addLeakyReluLayerToLpRelaxation( gurobi, layer, createVariables ); break; - + case Layer::ABSOLUTE_VALUE: addAbsoluteValueLayerToLpRelaxation( gurobi, layer, createVariables ); break; @@ -822,15 +826,15 @@ void LPFormulator::addLayerToModel( GurobiWrapper &gurobi, case Layer::MAX: addMaxLayerToLpRelaxation( gurobi, layer, createVariables ); break; - + case Layer::SIGMOID: addSigmoidLayerToLpRelaxation( gurobi, layer, createVariables ); break; - + case Layer::SOFTMAX: addSoftmaxLayerToLpRelaxation( gurobi, layer, createVariables ); - break; - + break; + case Layer::BILINEAR: addBilinearLayerToLpRelaxation( gurobi, layer, createVariables ); break; @@ -942,8 +946,8 @@ void LPFormulator::addReluLayerToLpRelaxation( GurobiWrapper &gurobi, } void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) + const Layer *layer, + bool createVariables ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { @@ -959,7 +963,7 @@ void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, { // If the source neuron has been eliminated, this neuron is constant double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = FloatUtils::round(sourceValue); + double targetValue = FloatUtils::round( sourceValue ); gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); @@ -973,11 +977,11 @@ void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - double ub = std::max( FloatUtils::round( sourceUb ), layer->getUb( i ) ); - double lb = std::min( FloatUtils::round( sourceLb ), layer->getLb( i ) ); - + double ub = std::min( FloatUtils::round( sourceUb ), layer->getUb( i ) ); + double lb = std::max( FloatUtils::round( sourceLb ), layer->getLb( i ) ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - + // If u = l: y = round(u) if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) { @@ -985,29 +989,29 @@ void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); gurobi.addEqConstraint( terms, ub ); } - + else { - List terms; - // y <= x + 0.5, i.e. y - x <= 0.5 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, 0.5 ); - - // y >= x - 0.5, i.e. y - x >= -0.5 - terms.clear(); - terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, -0.5 ); + List terms; + // y <= x + 0.5, i.e. y - x <= 0.5 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, 0.5 ); + + // y >= x - 0.5, i.e. y - x >= -0.5 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, -0.5 ); } } } } void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) + const Layer *layer, + bool createVariables ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { @@ -1036,10 +1040,6 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, String sourceName = Stringf( "x%u", sourceVariable ); if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - double ub = std::min( FloatUtils::max( -sourceLb, sourceUb ), layer->getUb( i ) ); - - gurobi.addVariable(Stringf( "x%u", targetVariable ), 0, ub ); if ( !FloatUtils::isNegative( sourceLb ) ) { @@ -1047,6 +1047,10 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, if ( sourceLb < 0 ) sourceLb = 0; + double ub = std::min( sourceUb, layer->getUb( i ) ); + double lb = std::max( sourceLb, layer->getLb( i ) ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); @@ -1054,6 +1058,10 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, } else if ( !FloatUtils::isPositive( sourceUb ) ) { + double ub = std::min( -sourceLb, layer->getUb( i ) ); + double lb = std::max( -sourceUb, layer->getLb( i ) ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + // The AbsoluteValue is inactive, y = -x, i.e. y + x = 0 List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1062,6 +1070,9 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, } else { + double ub = std::min( std::max( -sourceLb, sourceUb ), layer->getUb( i ) ); + double lb = std::max( 0.0, layer->getLb( i ) ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); /* The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). */ @@ -1081,8 +1092,8 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, } void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) + const Layer *layer, + bool createVariables ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { @@ -1098,7 +1109,7 @@ void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, { // If the source neuron has been eliminated, this neuron is constant double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); - double targetValue = SigmoidConstraint::sigmoid(sourceValue); + double targetValue = SigmoidConstraint::sigmoid( sourceValue ); gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); @@ -1111,12 +1122,15 @@ void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, String sourceName = Stringf( "x%u", sourceVariable ); if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - - double ub = std::min( SigmoidConstraint::sigmoid( sourceUb ), layer->getUb( i ) ); - double lb = std::max( SigmoidConstraint::sigmoid( sourceLb ), layer->getLb( i ) ); - + + double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); + double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); + + double ub = std::min( sourceUbSigmoid, layer->getUb( i ) ); + double lb = std::max( sourceLbSigmoid, layer->getLb( i ) ); + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - + // If u = l: y = sigmoid(u) if ( FloatUtils::areEqual( sourceUb, sourceLb ) ) { @@ -1125,49 +1139,57 @@ void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); gurobi.addEqConstraint( terms, ub ); } - + else { List terms; double lambda = ( ub - lb ) / ( sourceUb - sourceLb ); double lambdaPrime = std::min( SigmoidConstraint::sigmoidDerivative( sourceLb ), - SigmoidConstraint::sigmoidDerivative( sourceUb ) ); - + SigmoidConstraint::sigmoidDerivative( sourceUb ) ); + // update lower bound if ( FloatUtils::isPositive( sourceLb ) ) - { - // y >= lambda * (x - l), i.e. y - lambda * x >= lambda * l + { + // y >= lambda * (x - l) + sigmoid(lb), i.e. y - lambda * x >= sigmoid(lb) - + // lambda * l terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, sourceLb * lambda ); + terms.append( + GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, sourceLbSigmoid - sourceLb * lambda ); } - + else { - // y >= lambda' * (x - l), i.e. y - lambda' * x >= lambda' * l + // y >= lambda' * (x - l) + sigmoid(lb), i.e. y - lambda' * x >= sigmoid(lb) - + // lambda' * l terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addGeqConstraint( terms, sourceLb * lambdaPrime ); + terms.append( + GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, sourceLbSigmoid - sourceLb * lambdaPrime ); } - + // update upper bound if ( !FloatUtils::isPositive( sourceUb ) ) { - // y <= lambda * (x - u), i.e. y - lambda * x <= lambda * u + // y <= lambda * (x - u) + sigmoid(ub), i.e. y - lambda * x <= sigmoid(ub) - + // lambda * u terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, sourceUb * lambda ); + terms.append( + GurobiWrapper::Term( -lambda, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, sourceUbSigmoid - sourceUb * lambda ); } else { - // y <= lambda' * (x - u), i.e. y - lambda' * x <= lambda' * u + // y <= lambda' * (x - u) + sigmoid(ub), i.e. y - lambda' * x <= sigmoid(ub) - + // lambda' * u terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); - gurobi.addLeqConstraint( terms, sourceUb * lambdaPrime ); + terms.append( + GurobiWrapper::Term( -lambdaPrime, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addLeqConstraint( terms, sourceUbSigmoid - sourceUb * lambdaPrime ); } } } @@ -1338,14 +1360,14 @@ void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, } void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables ) + const Layer *layer, + bool createVariables ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { if ( layer->neuronEliminated( i ) ) continue; - + Set handledInputNeurons; List sources = layer->getActivationSources( i ); @@ -1364,7 +1386,7 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, String sourceName = Stringf( "x%u", sourceVariable ); if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); sourceMids.append( ( sourceLb + sourceUb ) / 2 ); @@ -1385,12 +1407,18 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, } } - double ub = std::min( DeepPolySoftmaxElement::linearUpperBound( sourceLbs, sourceUbs, index ), layer->getUb( i ) ); - double lb = std::max( DeepPolySoftmaxElement::linearLowerBound( sourceLbs, sourceUbs, index ), layer->getLb( i ) ); - + double ub = + std::min( DeepPolySoftmaxElement::linearUpperBound( sourceLbs, sourceUbs, index ), + layer->getUb( i ) ); + double lb = + std::max( DeepPolySoftmaxElement::linearLowerBound( sourceLbs, sourceUbs, index ), + layer->getLb( i ) ); + targetLbs[index] = lb; + targetUbs[index] = ub; + unsigned targetVariable = layer->neuronToVariable( i ); gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - + double bias; SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); @@ -1417,15 +1445,17 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); + bias = DeepPolySoftmaxElement::LSELowerBound( + sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = - DeepPolySoftmaxElement::dLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + double dldj = DeepPolySoftmaxElement::dLSELowerBound( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( + GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); bias -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -1435,15 +1465,17 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); + bias = DeepPolySoftmaxElement::LSELowerBound2( + sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = - DeepPolySoftmaxElement::dLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); + double dldj = DeepPolySoftmaxElement::dLSELowerBound2( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + terms.append( + GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); bias -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -1452,15 +1484,16 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::LSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + bias = DeepPolySoftmaxElement::LSEUpperBound( + sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = - DeepPolySoftmaxElement::dLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + double dudj = DeepPolySoftmaxElement::dLSEUpperbound( + sourceMids, targetLbs, targetUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); bias -= dudj * sourceMids[inputIndex]; ++inputIndex; @@ -1471,32 +1504,34 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, { terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + bias = + DeepPolySoftmaxElement::ERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); unsigned inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dldj = - DeepPolySoftmaxElement::dERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + double dldj = DeepPolySoftmaxElement::dERLowerBound( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dldj, Stringf( "x%u", sourceVariable ) ) ); - bias -= dldj * sourceMids[inputIndex]; + bias -= dldj * sourceMids[inputIndex]; ++inputIndex; } gurobi.addGeqConstraint( terms, bias ); terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - bias = DeepPolySoftmaxElement::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); + bias = + DeepPolySoftmaxElement::ERUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &source : sources ) { const Layer *sourceLayer = _layerOwner->getLayer( source._layer ); unsigned sourceNeuron = source._neuron; unsigned sourceVariable = sourceLayer->neuronToVariable( sourceNeuron ); - double dudj = - DeepPolySoftmaxElement::dERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + double dudj = DeepPolySoftmaxElement::dERUpperBound( + sourceMids, targetLbs, targetUbs, index, inputIndex ); terms.append( GurobiWrapper::Term( -dudj, Stringf( "x%u", sourceVariable ) ) ); bias -= dudj * sourceMids[inputIndex]; ++inputIndex; @@ -1507,8 +1542,9 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, } } -void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, - bool createVariables ) +void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { @@ -1519,12 +1555,11 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const List sources = layer->getActivationSources( i ); const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - + Vector sourceLbs; Vector sourceUbs; Vector sourceValues; Vector sourceNeurons; - unsigned counter = 0; bool allConstant = true; for ( const auto &sourceIndex : sources ) { @@ -1532,15 +1567,14 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); - + sourceNeurons.append( sourceNeuron ); - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); - ++counter; - + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { allConstant = false; @@ -1550,9 +1584,8 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); sourceValues.append( sourceValue ); } - ++counter; } - + if ( allConstant ) { // If the both source neurons have been eliminated, this neuron is constant @@ -1560,7 +1593,7 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); continue; } - + double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); List values = { sourceLbs[0] * sourceLbs[1], @@ -1574,22 +1607,30 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const if ( v > ub ) ub = v; } - + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); - + // Lower bound: out >= l_y * x + l_x * y - l_x * l_y List terms; terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -sourceLbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); - terms.append( GurobiWrapper::Term( -sourceLbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + terms.append( GurobiWrapper::Term( + -sourceLbs[1], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( + -sourceLbs[0], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); - + // Upper bound: out <= u_y * x + l_x * y - l_x * u_y terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -sourceUbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); - terms.append( GurobiWrapper::Term( -sourceLbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + terms.append( GurobiWrapper::Term( + -sourceUbs[1], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( + -sourceLbs[0], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); } } @@ -1755,7 +1796,6 @@ void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, { switch ( layer->getLayerType() ) { - case Layer::RELU: addReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); break; @@ -1763,11 +1803,11 @@ void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, case Layer::LEAKY_RELU: addLeakyReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); break; - + case Layer::SIGN: addSignLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); break; - + default: addLayerToModel( gurobi, layer, createVariables ); break; @@ -1844,7 +1884,8 @@ void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurob terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); gurobi.addGeqConstraint( terms, 0 ); - // y >= coeff * x, i.e. y - coeff * x >= 0 (varies continuously between y >= 0 and y >= alpha * x). + // y >= coeff * x, i.e. y - coeff * x >= 0 (varies continuously between y >= 0 and + // y >= alpha * x). terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -coeff, Stringf( "x%u", sourceVariable ) ) ); @@ -1928,23 +1969,26 @@ void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurob /* 2 - y <= ----- * (1 - coeff) x + 1 (varies continuously between y <= 1 and y <= -2 / l * x + 1). + y <= ----- * (1 - coeff) x + 1 (varies continuously between y <= 1 and y <= -2 / l * + x + 1). - l */ List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( 2.0 / sourceLb * (1 - coeff), Stringf( "x%u", sourceVariable ) ) ); + terms.append( GurobiWrapper::Term( 2.0 / sourceLb * ( 1 - coeff ), + Stringf( "x%u", sourceVariable ) ) ); gurobi.addLeqConstraint( terms, 1 ); /* 2 - y >= ----- * (1 - coeff) x - 1 (varies continuously between y >= -1 and y >= 2 / u * x - 1). + y >= ----- * (1 - coeff) x - 1 (varies continuously between y >= -1 and y >= 2 / u * + x - 1). u */ terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( - GurobiWrapper::Term( -2.0 / sourceUb * (1 - coeff), Stringf( "x%u", sourceVariable ) ) ); + terms.append( GurobiWrapper::Term( -2.0 / sourceUb * ( 1 - coeff ), + Stringf( "x%u", sourceVariable ) ) ); gurobi.addGeqConstraint( terms, -1 ); } } @@ -2002,7 +2046,7 @@ void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper & // The LeakyReLU is inactive, y = alpha * x List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( slope, Stringf( "x%u", sourceVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); gurobi.addEqConstraint( terms, 0 ); } else @@ -2014,15 +2058,17 @@ void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper & /* The phase of this LeakyReLU is not yet fixed. For y = LeakyReLU(x), we add the following triangular relaxation: - 1. y >= ((1 - alpha) * coeff + alpha) * x (varies continuously between y >= alpha * x and y >= x). + 1. y >= ((1 - alpha) * coeff + alpha) * x (varies continuously between y >= + alpha * x and y >= x). 2. y >= x 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) */ - // y >= ((alpha+1) * coeff - alpha) * x + // y >= ((1 - alpha) * coeff + alpha) * x List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -slope - ( 1 - slope ) * coeff, Stringf( "x%u", sourceVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope - ( 1 - slope ) * coeff, + Stringf( "x%u", sourceVariable ) ) ); gurobi.addGeqConstraint( terms, 0 ); // y >= x, i.e. y - x >= 0 diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index 41f5ac1a3f..f1a5849004 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -16,7 +16,7 @@ #ifndef __LPFormulator_h__ #define __LPFormulator_h__ -#include "CoordinateDescent.h" +#include "AdamOptimizer.h" #include "GurobiWrapper.h" #include "LayerOwner.h" #include "Map.h" @@ -110,47 +110,60 @@ class LPFormulator : public ParallelSolver void addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - + void addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - - void - addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - - void - addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - - void - addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - - void - addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - + + void addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ); + + void addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ); + + void addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ); + + void addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables ); + void addWeightedSumLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - void optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, bool backward, std::vector coeffs = {} ); - - - // Create LP relaxations depending on an external parameter. + void optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, + bool backward, + std::vector coeffs = {} ); + + + // Create LP relaxations depending on an external parameter. void addLayerToParameterisedModel( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ); - - void - addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - - void - addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - - void - addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, double coeff ); - - + const Layer *layer, + bool createVariables, + double coeff ); + + void addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ); + + void addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ); + + void addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ); + + // Estimate Volume of parameterised LP relaxations. - static double EstimateVolume( const Map &layers, std::vector coeffs ); + static double EstimateVolume( const Map &layers, + std::vector coeffs ); /* diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 644c056f8d..b53777488e 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -417,7 +417,7 @@ void Layer::computeSimulations() for ( unsigned j = 0; j < simulationSize; ++j ) { _simulations[i][j] = FloatUtils::negativeInfinity(); - + Vector inputs; Vector outputs; unsigned outputIndex = 0; @@ -433,7 +433,7 @@ void Layer::computeSimulations() inputs.append( value ); ++index; } - + SoftmaxConstraint::softmax( inputs, outputs ); _simulations[i][j] = outputs[outputIndex]; } @@ -724,27 +724,27 @@ void Layer::computeIntervalArithmeticBounds() case SIGN: computeIntervalArithmeticBoundsForSign(); break; - + case ROUND: computeIntervalArithmeticBoundsForRound(); break; - + case LEAKY_RELU: computeIntervalArithmeticBoundsForLeakyRelu(); break; - + case SIGMOID: computeIntervalArithmeticBoundsForSigmoid(); break; - + case MAX: computeIntervalArithmeticBoundsForMax(); break; - + case SOFTMAX: computeIntervalArithmeticBoundsForSoftmax(); break; - + case BILINEAR: computeIntervalArithmeticBoundsForBilinear(); break; @@ -925,7 +925,7 @@ void Layer::computeIntervalArithmeticBoundsForSign() double lb = sourceLayer->getLb( sourceIndex._neuron ); double ub = sourceLayer->getUb( sourceIndex._neuron ); - + double new_lb; double new_ub; @@ -944,7 +944,7 @@ void Layer::computeIntervalArithmeticBoundsForSign() new_lb = -1; new_ub = 1; } - + /* We now have the tightest bounds we can for the relu variable. If they are tigheter than what was previously @@ -1041,10 +1041,10 @@ void Layer::computeIntervalArithmeticBoundsForSigmoid() double lb = sourceLayer->getLb( sourceIndex._neuron ); double ub = sourceLayer->getUb( sourceIndex._neuron ); - + double lbSigmoid = SigmoidConstraint::sigmoid( lb ); double ubSigmoid = SigmoidConstraint::sigmoid( ub ); - + if ( _lb[i] < lbSigmoid ) { @@ -1073,10 +1073,10 @@ void Layer::computeIntervalArithmeticBoundsForRound() double lb = sourceLayer->getLb( sourceIndex._neuron ); double ub = sourceLayer->getUb( sourceIndex._neuron ); - + double lbRound = FloatUtils::round( lb ); double ubRound = FloatUtils::round( ub ); - + if ( _lb[i] < lbRound ) { @@ -1116,7 +1116,7 @@ void Layer::computeIntervalArithmeticBoundsForMax() unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); - + sourceLbs[sourceIndex] = sourceLb; sourceUbs[sourceIndex] = sourceUb; @@ -1155,7 +1155,7 @@ void Layer::computeIntervalArithmeticBoundsForMax() _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - + if ( _ub[i] > sourceUbs[indexOfMaxLowerBound] ) { _ub[i] = sourceUbs[indexOfMaxLowerBound]; @@ -1173,7 +1173,7 @@ void Layer::computeIntervalArithmeticBoundsForMax() _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - + if ( _ub[i] > maxUpperBound ) { _ub[i] = maxUpperBound; @@ -1195,9 +1195,9 @@ void Layer::computeIntervalArithmeticBoundsForSoftmax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - + ASSERT( sourceLayer->getSize() == _size ); - + Vector sourceLbs; Vector sourceUbs; for ( const auto &sourceIndex : sources ) @@ -1205,7 +1205,7 @@ void Layer::computeIntervalArithmeticBoundsForSoftmax() unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); - + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); } @@ -1250,9 +1250,9 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - + Vector sourceLbs; Vector sourceUbs; Vector sourceValues; @@ -1262,10 +1262,10 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); - - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - + + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { allConstant = false; @@ -1276,19 +1276,19 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() sourceValues.append( sourceValue ); } } - + if ( allConstant ) { // If the both source neurons have been eliminated, this neuron is constant double value = sourceValues[0] * sourceValues[1]; - + if ( _lb[i] < value ) { _lb[i] = value; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } - + if ( _ub[i] > value ) { _ub[i] = value; @@ -1297,7 +1297,7 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() } continue; } - + double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); List values = { sourceLbs[0] * sourceLbs[1], @@ -1311,8 +1311,8 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() if ( v > ub ) ub = v; } - - + + if ( _lb[i] < lb ) { _lb[i] = lb; @@ -1352,31 +1352,31 @@ void Layer::computeSymbolicBounds() case ABSOLUTE_VALUE: computeSymbolicBoundsForAbsoluteValue(); break; - + case LEAKY_RELU: computeSymbolicBoundsForLeakyRelu(); break; - + case ROUND: computeSymbolicBoundsForRound(); break; - + case SIGMOID: computeSymbolicBoundsForSigmoid(); break; - + case MAX: computeSymbolicBoundsForMax(); break; - + case SOFTMAX: computeSymbolicBoundsForSoftmax(); break; - + case BILINEAR: computeSymbolicBoundsForBilinear(); break; - + default: computeSymbolicBoundsDefault(); break; @@ -1554,19 +1554,19 @@ void Layer::computeSymbolicBoundsForRelu() { for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicLb[j * _size + i] = 0; - + _symbolicLowerBias[i] = 0; } else - { + { for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicLb[j * _size + i] = _symbolicLb[j * _size + i] * _symbolicUbOfLb[i] / ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); - + _symbolicLowerBias[i] = _symbolicLowerBias[i] * _symbolicUbOfLb[i] / ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); } - + _symbolicLbOfLb[i] = 0; } else @@ -1698,7 +1698,7 @@ void Layer::computeSymbolicBoundsForSign() { PhaseStatus upperSignPhase = PHASE_NOT_FIXED; PhaseStatus lowerSignPhase = PHASE_NOT_FIXED; - + // If we got here, we know that lbLb < 0 and ubUb // > 0 @@ -1712,7 +1712,7 @@ void Layer::computeSymbolicBoundsForSign() _symbolicUb[j * _size + i] = 0; _symbolicUpperBias[i] = 1; - + upperSignPhase = SIGN_PHASE_POSITIVE; } else @@ -1757,8 +1757,8 @@ void Layer::computeSymbolicBoundsForSign() _symbolicLowerBias[i] *= factor; _symbolicLowerBias[i] -= 1; } - - if (upperSignPhase == PHASE_NOT_FIXED) + + if ( upperSignPhase == PHASE_NOT_FIXED ) { _symbolicUbOfUb[i] = 1; _symbolicLbOfUb[i] = -1; @@ -1768,8 +1768,8 @@ void Layer::computeSymbolicBoundsForSign() _symbolicUbOfUb[i] = 1; _symbolicLbOfUb[i] = 1; } - - if (lowerSignPhase == PHASE_NOT_FIXED) + + if ( lowerSignPhase == PHASE_NOT_FIXED ) { _symbolicUbOfLb[i] = 1; _symbolicLbOfLb[i] = -1; @@ -1961,7 +1961,7 @@ void Layer::computeSymbolicBoundsForAbsoluteValue() void Layer::computeSymbolicBoundsForLeakyRelu() { ASSERT( _alpha > 0 && _alpha < 1 ); - + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); @@ -2052,11 +2052,11 @@ void Layer::computeSymbolicBoundsForLeakyRelu() { _symbolicUb[j * _size + i] *= weight; } - + // Do the same for the bias, and then adjust _symbolicUpperBias[i] *= weight; _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; - + // For the lower bound, in general, x_f >= lambda * x_b, where // 0 <= lambda <= 1, would be a sound lower bound. We @@ -2068,7 +2068,7 @@ void Layer::computeSymbolicBoundsForLeakyRelu() // lambda = 1 // Symbolic lower bound: x_f >= x_b // Concrete lower bound: x_f >= sourceLb - + // Lower bounds are passed as is } else @@ -2076,12 +2076,12 @@ void Layer::computeSymbolicBoundsForLeakyRelu() // lambda = 1 // Symbolic lower bound: x_f >= _alpha x_b // Concrete lower bound: x_f >= 0 - + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { _symbolicLb[j * _size + i] *= _alpha; } - + _symbolicLowerBias[i] *= _alpha; } } @@ -2091,7 +2091,7 @@ void Layer::computeSymbolicBoundsForLeakyRelu() { _symbolicLb[j * _size + i] *= weight; } - + // Do the same for the bias, and then adjust _symbolicLowerBias[i] *= weight; _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; @@ -2106,11 +2106,11 @@ void Layer::computeSymbolicBoundsForLeakyRelu() { _symbolicUb[j * _size + i] *= _alpha; } - + _symbolicUpperBias[i] *= _alpha; } } - + /* We now have the symbolic representation for the current layer. Next, we compute new lower and upper bounds for @@ -2126,9 +2126,9 @@ void Layer::computeSymbolicBoundsForLeakyRelu() { double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - + double entry = _symbolicLb[j * _size + i]; - + if ( entry >= 0 ) { _symbolicLbOfLb[i] += ( entry * inputLb ); @@ -2139,9 +2139,9 @@ void Layer::computeSymbolicBoundsForLeakyRelu() _symbolicLbOfLb[i] += ( entry * inputUb ); _symbolicUbOfLb[i] += ( entry * inputLb ); } - + entry = _symbolicUb[j * _size + i]; - + if ( entry >= 0 ) { _symbolicLbOfUb[i] += ( entry * inputLb ); @@ -2179,7 +2179,7 @@ void Layer::computeSymbolicBoundsForLeakyRelu() _symbolicUpperBias[i] *= _alpha; } } - + if ( _symbolicUbOfUb[i] > sourceUb ) _symbolicUbOfUb[i] = sourceUb; if ( _symbolicLbOfLb[i] < _alpha * sourceLb ) @@ -2233,7 +2233,7 @@ void Layer::computeSymbolicBoundsForSigmoid() ASSERT( _neuronToActivationSources.exists( i ) ); NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - + /* A Sigmoid initially "inherits" the symbolic bounds computed for its input variable @@ -2259,11 +2259,11 @@ void Layer::computeSymbolicBoundsForSigmoid() _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - + // Bounds of lb, ub are the Sigmoids of source lb, ub double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); - + // Case when the Sigmoid constraint is fixed if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) { @@ -2272,16 +2272,16 @@ void Layer::computeSymbolicBoundsForSigmoid() _symbolicLb[j * _size + i] = 0; _symbolicUb[j * _size + i] = 0; } - + _symbolicLbOfUb[i] = sourceUbSigmoid; _symbolicUbOfUb[i] = sourceUbSigmoid; _symbolicLbOfLb[i] = sourceLbSigmoid; _symbolicUbOfLb[i] = sourceLbSigmoid; - + _symbolicUpperBias[i] = sourceUbSigmoid; _symbolicLowerBias[i] = sourceLbSigmoid; } - + // Sigmoid not fixed else { @@ -2296,7 +2296,7 @@ void Layer::computeSymbolicBoundsForSigmoid() { _symbolicLb[j * _size + i] *= lambda; } - + // Do the same for the bias, and then adjust _symbolicLowerBias[i] *= lambda; _symbolicLowerBias[i] += sourceLbSigmoid - lambda * sourceLb; @@ -2307,7 +2307,7 @@ void Layer::computeSymbolicBoundsForSigmoid() { _symbolicLb[j * _size + i] *= lambdaPrime; } - + // Do the same for the bias, and then adjust _symbolicLowerBias[i] *= lambdaPrime; _symbolicLowerBias[i] += sourceLbSigmoid - lambdaPrime * sourceLb; @@ -2320,7 +2320,7 @@ void Layer::computeSymbolicBoundsForSigmoid() { _symbolicUb[j * _size + i] *= lambda; } - + // Do the same for the bias, and then adjust _symbolicUpperBias[i] *= lambda; _symbolicUpperBias[i] += sourceUbSigmoid - lambda * sourceUb; @@ -2331,13 +2331,13 @@ void Layer::computeSymbolicBoundsForSigmoid() { _symbolicUb[j * _size + i] *= lambdaPrime; } - + // Do the same for the bias, and then adjust _symbolicUpperBias[i] *= lambdaPrime; _symbolicUpperBias[i] += sourceUbSigmoid - lambdaPrime * sourceUb; } - - + + /* We now have the symbolic representation for the current layer. Next, we compute new lower and upper bounds for @@ -2353,9 +2353,9 @@ void Layer::computeSymbolicBoundsForSigmoid() { double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - + double entry = _symbolicLb[j * _size + i]; - + if ( entry >= 0 ) { _symbolicLbOfLb[i] += ( entry * inputLb ); @@ -2366,9 +2366,9 @@ void Layer::computeSymbolicBoundsForSigmoid() _symbolicLbOfLb[i] += ( entry * inputUb ); _symbolicUbOfLb[i] += ( entry * inputLb ); } - + entry = _symbolicUb[j * _size + i]; - + if ( entry >= 0 ) { _symbolicLbOfUb[i] += ( entry * inputLb ); @@ -2381,12 +2381,12 @@ void Layer::computeSymbolicBoundsForSigmoid() } } } - + if ( _symbolicLbOfLb[i] < -1 ) _symbolicLbOfLb[i] = -1; if ( _symbolicUbOfUb[i] > 1 ) _symbolicUbOfUb[i] = 1; - + /* We now have the tightest bounds we can for the relu variable. If they are tigheter than what was previously @@ -2435,7 +2435,7 @@ void Layer::computeSymbolicBoundsForRound() ASSERT( _neuronToActivationSources.exists( i ) ); NeuronIndex sourceIndex = *_neuronToActivationSources[i].begin(); const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); - + /* A Round initially "inherits" the symbolic bounds computed for its input variable @@ -2461,42 +2461,42 @@ void Layer::computeSymbolicBoundsForRound() _symbolicUbOfLb[i] = sourceLayer->getSymbolicUbOfLb( sourceIndex._neuron ); _symbolicLbOfUb[i] = sourceLayer->getSymbolicLbOfUb( sourceIndex._neuron ); _symbolicUbOfUb[i] = sourceLayer->getSymbolicUbOfUb( sourceIndex._neuron ); - - + + // Bounds of lb, ub are the rounded values of source lb, ub double sourceUbRound = FloatUtils::round( sourceUb ); double sourceLbRound = FloatUtils::round( sourceLb ); - + _symbolicLbOfUb[i] = sourceUbRound; _symbolicUbOfUb[i] = sourceUbRound; _symbolicLbOfLb[i] = sourceLbRound; _symbolicUbOfLb[i] = sourceLbRound; - - + + // Case when the Round constraint is fixed if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) { _symbolicUb[i] = 0; _symbolicUpperBias[i] = sourceUbRound; - + _symbolicLb[i] = 0; _symbolicLowerBias[i] = sourceLbRound; } - + // Round not fixed else { // Symbolic upper bound: x_f <= x_b + 0.5 // Concrete upper bound: x_f <= round(ub_b) - + _symbolicUpperBias[i] += 0.5; // Symbolic lower bound: x_f >= x_b - 0.5 // Concrete lower bound: x_f >= round(lb_b) - + _symbolicLowerBias[i] -= 0.5; } - + /* We now have the tightest bounds we can for the relu variable. If they are tigheter than what was previously @@ -2545,7 +2545,7 @@ void Layer::computeSymbolicBoundsForMax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - + unsigned sourceLayerSize = sourceLayer->getSize(); const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); @@ -2561,7 +2561,7 @@ void Layer::computeSymbolicBoundsForMax() unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); - + sourceLbs[sourceIndex] = sourceLb; sourceUbs[sourceIndex] = sourceUb; @@ -2596,15 +2596,17 @@ void Layer::computeSymbolicBoundsForMax() // Concrete bound: lb_b <= x_f <= ub_b for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; - _symbolicUb[j * _size + i] = - sourceSymbolicUb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; - } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; - _symbolicUpperBias[i] = sourceLayer->getSymbolicUpperBias()[indexOfMaxLowerBound._neuron]; - - + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; + _symbolicUb[j * _size + i] = + sourceSymbolicUb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; + } + _symbolicLowerBias[i] = + sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; + _symbolicUpperBias[i] = + sourceLayer->getSymbolicUpperBias()[indexOfMaxLowerBound._neuron]; + + _symbolicLbOfLb[i] = maxLowerBound; _symbolicUbOfLb[i] = maxLowerBound; _symbolicLbOfUb[i] = sourceUbs[indexOfMaxLowerBound]; @@ -2617,19 +2619,20 @@ void Layer::computeSymbolicBoundsForMax() // Concrete bounds: lb_b <= x_f <= maxUpperBound for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - _symbolicLb[j * _size + i] = - sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; - _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = + sourceSymbolicLb[j * sourceLayerSize + indexOfMaxLowerBound._neuron]; + _symbolicUb[j * _size + i] = 0; } - _symbolicLowerBias[i] = sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; + _symbolicLowerBias[i] = + sourceLayer->getSymbolicLowerBias()[indexOfMaxLowerBound._neuron]; _symbolicUpperBias[i] = maxUpperBound; - + _symbolicLbOfLb[i] = maxLowerBound; _symbolicUbOfLb[i] = maxLowerBound; _symbolicLbOfUb[i] = maxUpperBound; _symbolicUbOfUb[i] = maxUpperBound; } - + /* We now have the tightest bounds we can for the relu variable. If they are tigheter than what was previously @@ -2655,7 +2658,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { std::fill_n( _symbolicLowerBias, _size, 0 ); std::fill_n( _symbolicUpperBias, _size, 0 ); - + double *symbolicLb = new double[_size * _size]; double *symbolicUb = new double[_size * _size]; std::fill_n( symbolicLb, _size * _size, 0 ); @@ -2663,7 +2666,7 @@ void Layer::computeSymbolicBoundsForSoftmax() double *_work = new double[_size * _size]; std::fill_n( _work, _size * _size, 0 ); - + Set handledInputNeurons; unsigned sourceLayerSize = _size; SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); @@ -2690,10 +2693,10 @@ void Layer::computeSymbolicBoundsForSoftmax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - + sourceLayerSize = sourceLayer->getSize(); ASSERT( sourceLayerSize == _size ); - + Vector sourceLbs; Vector sourceUbs; Vector sourceMids; @@ -2705,13 +2708,13 @@ void Layer::computeSymbolicBoundsForSoftmax() unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); - + sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); sourceMids.append( ( sourceLb + sourceUb ) / 2 ); targetLbs.append( _lb[i] ); targetUbs.append( _ub[i] ); - + ++len; } @@ -2773,8 +2776,8 @@ void Layer::computeSymbolicBoundsForSoftmax() softmaxLSELowerBound( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &sourceIndex : sources ) { - double dldj = - softmaxdLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + double dldj = softmaxdLSELowerBound( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); symbolicLb[len * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; @@ -2786,20 +2789,21 @@ void Layer::computeSymbolicBoundsForSoftmax() softmaxLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index ); for ( const auto &sourceIndex : sources ) { - double dldj = - softmaxdLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); + double dldj = softmaxdLSELowerBound2( + sourceMids, sourceLbs, sourceUbs, index, inputIndex ); symbolicLb[len * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } } - _symbolicUpperBias[i] = softmaxLSEUpperBound( sourceMids, targetLbs, targetUbs, index ); + _symbolicUpperBias[i] = + softmaxLSEUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &sourceIndex : sources ) { - double dudj = - softmaxdLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); + double dudj = softmaxdLSEUpperbound( + sourceMids, targetLbs, targetUbs, index, inputIndex ); symbolicUb[len * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; @@ -2807,7 +2811,8 @@ void Layer::computeSymbolicBoundsForSoftmax() } else if ( boundType == SoftmaxBoundType::EXPONENTIAL_RECIPROCAL_DECOMPOSITION ) { - _symbolicLowerBias[i] = softmaxERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); + _symbolicLowerBias[i] = + softmaxERLowerBound( sourceMids, sourceLbs, sourceUbs, index ); unsigned inputIndex = 0; for ( const auto &sourceIndex : sources ) { @@ -2818,7 +2823,8 @@ void Layer::computeSymbolicBoundsForSoftmax() ++inputIndex; } - _symbolicUpperBias[i] = softmaxERUpperBound( sourceMids, targetLbs, targetUbs, index ); + _symbolicUpperBias[i] = + softmaxERUpperBound( sourceMids, targetLbs, targetUbs, index ); inputIndex = 0; for ( const auto &sourceIndex : sources ) { @@ -2831,7 +2837,7 @@ void Layer::computeSymbolicBoundsForSoftmax() } } } - + for ( const auto &sourceLayerEntry : _sourceLayers ) { const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); @@ -2858,9 +2864,13 @@ void Layer::computeSymbolicBoundsForSoftmax() sourceLayerSize, _size ); if ( sourceLayer->getSymbolicLowerBias() ) - matrixMultiplication( - sourceLayer->getSymbolicLowerBias(), _work, _symbolicLowerBias, 1, sourceLayerSize, _size ); - + matrixMultiplication( sourceLayer->getSymbolicLowerBias(), + _work, + _symbolicLowerBias, + 1, + sourceLayerSize, + _size ); + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) { if ( symbolicLb[i] < 0 ) @@ -2876,9 +2886,13 @@ void Layer::computeSymbolicBoundsForSoftmax() sourceLayerSize, _size ); if ( sourceLayer->getSymbolicLowerBias() ) - matrixMultiplication( - sourceLayer->getSymbolicUpperBias(), _work, _symbolicLowerBias, 1, sourceLayerSize, _size ); - + matrixMultiplication( sourceLayer->getSymbolicUpperBias(), + _work, + _symbolicLowerBias, + 1, + sourceLayerSize, + _size ); + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) { if ( symbolicUb[i] > 0 ) @@ -2894,9 +2908,13 @@ void Layer::computeSymbolicBoundsForSoftmax() sourceLayerSize, _size ); if ( sourceLayer->getSymbolicUpperBias() ) - matrixMultiplication( - sourceLayer->getSymbolicUpperBias(), _work, _symbolicUpperBias, 1, sourceLayerSize, _size ); - + matrixMultiplication( sourceLayer->getSymbolicUpperBias(), + _work, + _symbolicUpperBias, + 1, + sourceLayerSize, + _size ); + for ( unsigned i = 0; i < sourceLayerSize * _size; ++i ) { if ( symbolicUb[i] < 0 ) @@ -2912,10 +2930,14 @@ void Layer::computeSymbolicBoundsForSoftmax() sourceLayerSize, _size ); if ( sourceLayer->getSymbolicUpperBias() ) - matrixMultiplication( - sourceLayer->getSymbolicLowerBias(), _work, _symbolicUpperBias, 1, sourceLayerSize, _size ); + matrixMultiplication( sourceLayer->getSymbolicLowerBias(), + _work, + _symbolicUpperBias, + 1, + sourceLayerSize, + _size ); } - + /* We now have the symbolic representation for the current layer. Next, we compute new lower and upper bounds for @@ -2964,7 +2986,7 @@ void Layer::computeSymbolicBoundsForSoftmax() } } } - + if ( symbolicLb ) { delete[] symbolicLb; @@ -2974,7 +2996,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { delete[] symbolicUb; symbolicUb = NULL; - } + } if ( _work ) { delete[] _work; @@ -3009,13 +3031,13 @@ void Layer::computeSymbolicBoundsForBilinear() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - + unsigned sourceLayerSize = sourceLayer->getSize(); const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - + Vector sourceLbs; Vector sourceUbs; Vector sourceValues; @@ -3028,10 +3050,10 @@ void Layer::computeSymbolicBoundsForBilinear() unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); - - sourceLbs.append( sourceLb - GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - sourceUbs.append( sourceUb + GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS ); - + + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { allConstant = false; @@ -3041,7 +3063,7 @@ void Layer::computeSymbolicBoundsForBilinear() double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); sourceValues.append( sourceValue ); } - + if ( counter == 0 ) { indexA = sourceIndex._neuron; @@ -3052,7 +3074,7 @@ void Layer::computeSymbolicBoundsForBilinear() } ++counter; } - + if ( allConstant ) { // If the both source neurons have been eliminated, this neuron is constant @@ -3061,59 +3083,67 @@ void Layer::computeSymbolicBoundsForBilinear() _symbolicUb[j * _size + i] = 0; _symbolicLb[j * _size + i] = 0; } - + _symbolicUpperBias[i] = sourceValues[0] * sourceValues[1]; _symbolicLowerBias[i] = sourceValues[0] * sourceValues[1]; continue; } - + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { _symbolicUb[j * _size + i] = 0; _symbolicLb[j * _size + i] = 0; } - + // Symbolic lower bound: // out >= alpha * x + beta * y + gamma // where alpha = lb_y, beta = lb_x, gamma = -lb_x * lb_y - + // Symbolic upper bound: // out <= alpha * x + beta * y + gamma // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - if (sourceLbs[1] >= 0) - { - _symbolicLb[j * _size + i] += sourceLbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; - } - else - { - _symbolicLb[j * _size + i] += sourceLbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; - } - - if (sourceUbs[1] >= 0) - { - _symbolicUb[j * _size + i] += sourceUbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; - } - else - { - _symbolicLb[j * _size + i] += sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; - } - - if (sourceLbs[0] >= 0) - { - _symbolicLb[j * _size + i] += sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; - } - else - { - _symbolicLb[j * _size + i] += sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; - } + if ( sourceLbs[1] >= 0 ) + { + _symbolicLb[j * _size + i] += + sourceLbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + else + { + _symbolicLb[j * _size + i] += + sourceLbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + } + + if ( sourceUbs[1] >= 0 ) + { + _symbolicUb[j * _size + i] += + sourceUbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + } + else + { + _symbolicLb[j * _size + i] += + sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + + if ( sourceLbs[0] >= 0 ) + { + _symbolicLb[j * _size + i] += + sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += + sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + } + else + { + _symbolicLb[j * _size + i] += + sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += + sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; + } } _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; - + double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); List values = { sourceLbs[0] * sourceLbs[1], @@ -3127,7 +3157,7 @@ void Layer::computeSymbolicBoundsForBilinear() if ( v > ub ) ub = v; } - + /* We now have the symbolic representation for the current layer. Next, we compute new lower and upper bounds for @@ -3143,9 +3173,9 @@ void Layer::computeSymbolicBoundsForBilinear() { double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - + double entry = _symbolicLb[j * _size + i]; - + if ( entry >= 0 ) { _symbolicLbOfLb[i] += ( entry * inputLb ); @@ -3170,7 +3200,7 @@ void Layer::computeSymbolicBoundsForBilinear() _symbolicUbOfUb[i] += ( entry * inputLb ); } } - + /* We now have the tightest bounds we can for the relu variable. If they are tigheter than what was previously @@ -3366,16 +3396,17 @@ double Layer::calculateDifferenceFromSymbolic( Map &point, { double lower_sum = _symbolicLowerBias[i]; double upper_sum = _symbolicUpperBias[i]; - + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - const NeuronIndex neuron( 0, j ); - lower_sum += _symbolicLb[j * _size + i] * point[neuron]; - upper_sum += _symbolicUb[j * _size + i] * point[neuron]; + const NeuronIndex neuron( 0, j ); + lower_sum += _symbolicLb[j * _size + i] * point[neuron]; + upper_sum += _symbolicUb[j * _size + i] * point[neuron]; } - + const NeuronIndex currentNeuron( _layerIndex, i ); - return FloatUtils::max( point[currentNeuron] - upper_sum , 0 ) + FloatUtils::max( lower_sum - point[currentNeuron] , 0 ); + return FloatUtils::max( point[currentNeuron] - upper_sum, 0 ) + + FloatUtils::max( lower_sum - point[currentNeuron], 0 ); } void Layer::computeParameterisedSymbolicBounds( double coeff, bool receive ) @@ -3389,34 +3420,34 @@ void Layer::computeParameterisedSymbolicBounds( double coeff, bool receive ) case SIGN: computeParameterisedSymbolicBoundsForSign( coeff ); break; - + case LEAKY_RELU: computeParameterisedSymbolicBoundsForLeakyRelu( coeff ); break; - + default: computeSymbolicBounds(); break; } - + if ( receive ) { for ( unsigned i = 0; i < _size; ++i ) { - Map lower_polygonal_tightening; - Map upper_polygonal_tightening; - + Map lower_polygonal_tightening; + Map upper_polygonal_tightening; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { const NeuronIndex neuron( 0, j ); lower_polygonal_tightening.insert( neuron, _symbolicLb[j * _size + i] ); upper_polygonal_tightening.insert( neuron, _symbolicUb[j * _size + i] ); } - - _layerOwner->receivePolygonalTighterBound( - PolygonalTightening( lower_polygonal_tightening, _symbolicLowerBias[i], PolygonalTightening::LB ) ); - _layerOwner->receivePolygonalTighterBound( - PolygonalTightening( upper_polygonal_tightening, _symbolicUpperBias[i], PolygonalTightening::UB ) ); + + _layerOwner->receivePolygonalTighterBound( PolygonalTightening( + lower_polygonal_tightening, _symbolicLowerBias[i], PolygonalTightening::LB ) ); + _layerOwner->receivePolygonalTighterBound( PolygonalTightening( + upper_polygonal_tightening, _symbolicUpperBias[i], PolygonalTightening::UB ) ); } } } @@ -3528,19 +3559,19 @@ void Layer::computeParameterisedSymbolicBoundsForRelu( double coeff ) { for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicLb[j * _size + i] *= coeff; - + _symbolicLowerBias[i] *= coeff; } else - { + { for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicLb[j * _size + i] = _symbolicLb[j * _size + i] * _symbolicUbOfLb[i] / ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); - + _symbolicLowerBias[i] = _symbolicLowerBias[i] * _symbolicUbOfLb[i] / ( _symbolicUbOfLb[i] - _symbolicLbOfLb[i] ); } - + _symbolicLbOfLb[i] = 0; } else @@ -3673,7 +3704,7 @@ void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) { PhaseStatus upperSignPhase = PHASE_NOT_FIXED; PhaseStatus lowerSignPhase = PHASE_NOT_FIXED; - + // If we got here, we know that lbLb < 0 and ubUb // > 0 @@ -3687,7 +3718,7 @@ void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) _symbolicUb[j * _size + i] = 0; _symbolicUpperBias[i] = 1; - + upperSignPhase = SIGN_PHASE_POSITIVE; } else @@ -3734,8 +3765,8 @@ void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) _symbolicLowerBias[i] *= factor; _symbolicLowerBias[i] -= 1; } - - if (upperSignPhase == PHASE_NOT_FIXED) + + if ( upperSignPhase == PHASE_NOT_FIXED ) { _symbolicUbOfUb[i] = 1; _symbolicLbOfUb[i] = -1; @@ -3745,8 +3776,8 @@ void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) _symbolicUbOfUb[i] = 1; _symbolicLbOfUb[i] = 1; } - - if (lowerSignPhase == PHASE_NOT_FIXED) + + if ( lowerSignPhase == PHASE_NOT_FIXED ) { _symbolicUbOfLb[i] = 1; _symbolicLbOfLb[i] = -1; @@ -3807,7 +3838,7 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) { ASSERT( _alpha > 0 && _alpha < 1 ); ASSERT( coeff >= 0 && coeff <= 1 ); - + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); @@ -3898,11 +3929,11 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) { _symbolicUb[j * _size + i] *= weight; } - + // Do the same for the bias, and then adjust _symbolicUpperBias[i] *= weight; _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; - + // For the lower bound, in general, x_f >= lambda * x_b, where // 0 <= lambda <= 1, would be a sound lower bound. We @@ -3914,20 +3945,20 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) // lambda = 1 // Symbolic lower bound: x_f >= x_b // Concrete lower bound: x_f >= sourceLb - + // Lower bounds are passed as is } else { - // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = alpha and lambda = 1). - // Symbolic lower bound: x_f >= ((1 - coeff) * weight + alpha) x_b - // Concrete lower bound: x_f >= 0 - + // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = + // alpha and lambda = 1). Symbolic lower bound: x_f >= ((1 - coeff) * weight + + // alpha) x_b Concrete lower bound: x_f >= 0 + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { _symbolicLb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; } - + _symbolicLowerBias[i] *= _alpha; } } @@ -3937,7 +3968,7 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) { _symbolicLb[j * _size + i] *= weight; } - + // Do the same for the bias, and then adjust _symbolicLowerBias[i] *= weight; _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; @@ -3952,11 +3983,11 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) { _symbolicUb[j * _size + i] *= _alpha; } - + _symbolicUpperBias[i] *= _alpha; } } - + /* We now have the symbolic representation for the current layer. Next, we compute new lower and upper bounds for @@ -3972,9 +4003,9 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) { double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); - + double entry = _symbolicLb[j * _size + i]; - + if ( entry >= 0 ) { _symbolicLbOfLb[i] += ( entry * inputLb ); @@ -3985,9 +4016,9 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) _symbolicLbOfLb[i] += ( entry * inputUb ); _symbolicUbOfLb[i] += ( entry * inputLb ); } - + entry = _symbolicUb[j * _size + i]; - + if ( entry >= 0 ) { _symbolicLbOfUb[i] += ( entry * inputLb ); @@ -4025,7 +4056,7 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) _symbolicUpperBias[i] *= _alpha; } } - + if ( _symbolicUbOfUb[i] > sourceUb ) _symbolicUbOfUb[i] = sourceUb; if ( _symbolicLbOfLb[i] < _alpha * sourceLb ) @@ -4053,9 +4084,9 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) } double Layer::softmaxLSELowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { double sum = 0; for ( unsigned j = 0; j < inputs.size(); ++j ) @@ -4071,10 +4102,10 @@ double Layer::softmaxLSELowerBound( const Vector &inputs, } double Layer::softmaxdLSELowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) { double val = 0; if ( i == di ) @@ -4101,9 +4132,9 @@ double Layer::softmaxdLSELowerBound( const Vector &inputMids, } double Layer::softmaxLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { double max = FloatUtils::negativeInfinity(); unsigned maxInputIndex = 0; @@ -4143,10 +4174,10 @@ double Layer::softmaxLSELowerBound2( const Vector &inputMids, } double Layer::softmaxdLSELowerBound2( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) { double max = FloatUtils::negativeInfinity(); unsigned maxInputIndex = 0; @@ -4217,9 +4248,9 @@ double Layer::softmaxdLSELowerBound2( const Vector &inputMids, } double Layer::softmaxLSEUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4233,10 +4264,10 @@ double Layer::softmaxLSEUpperBound( const Vector &inputs, } double Layer::softmaxdLSEUpperbound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4251,9 +4282,9 @@ double Layer::softmaxdLSEUpperbound( const Vector &inputMids, } double Layer::softmaxERLowerBound( const Vector &inputs, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i ) { Vector inputTilda; SoftmaxConstraint::xTilda( inputs, inputs[i], inputTilda ); @@ -4278,10 +4309,10 @@ double Layer::softmaxERLowerBound( const Vector &inputs, } double Layer::softmaxdERLowerBound( const Vector &inputMids, - const Vector &inputLbs, - const Vector &inputUbs, - unsigned i, - unsigned di ) + const Vector &inputLbs, + const Vector &inputUbs, + unsigned i, + unsigned di ) { double val = softmaxERLowerBound( inputMids, inputLbs, inputUbs, i ); @@ -4309,9 +4340,9 @@ double Layer::softmaxdERLowerBound( const Vector &inputMids, } double Layer::softmaxERUpperBound( const Vector &inputs, - const Vector &outputLb, - const Vector &outputUb, - unsigned i ) + const Vector &outputLb, + const Vector &outputUb, + unsigned i ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4323,10 +4354,10 @@ double Layer::softmaxERUpperBound( const Vector &inputs, } double Layer::softmaxdERUpperBound( const Vector &inputMids, - const Vector &outputLb, - const Vector &outputUb, - unsigned i, - unsigned di ) + const Vector &outputLb, + const Vector &outputUb, + unsigned i, + unsigned di ) { double li = outputLb[i]; double ui = outputUb[i]; @@ -4343,8 +4374,8 @@ double Layer::softmaxdERUpperBound( const Vector &inputMids, } double Layer::softmaxLinearLowerBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) + const Vector &inputUbs, + unsigned i ) { Vector uTilda; SoftmaxConstraint::xTilda( inputUbs, inputLbs[i], uTilda ); @@ -4353,8 +4384,8 @@ double Layer::softmaxLinearLowerBound( const Vector &inputLbs, } double Layer::softmaxLinearUpperBound( const Vector &inputLbs, - const Vector &inputUbs, - unsigned i ) + const Vector &inputUbs, + unsigned i ) { Vector lTilda; SoftmaxConstraint::xTilda( inputLbs, inputUbs[i], lTilda ); diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index aa318e9181..ee9e1aba05 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -140,7 +140,7 @@ class Layer void computeParameterisedSymbolicBounds( double coeff, bool receive = false ); double calculateDifferenceFromSymbolic( Map &point, unsigned i ) const; void computeIntervalArithmeticBounds(); - + /* Preprocessing functionality: variable elimination and reindexing */ @@ -209,7 +209,7 @@ class Layer void allocateMemory(); void freeMemoryIfNeeded(); - + /* The following methods compute concrete softmax output bounds using different linear approximation, as well as the coefficients @@ -225,12 +225,12 @@ class Layer const Vector &inputUbs, unsigned i, unsigned di ); - + double softmaxLSELowerBound2( const Vector &inputMids, const Vector &inputLbs, const Vector &inputUbs, unsigned i ); - + double softmaxdLSELowerBound2( const Vector &inputMids, const Vector &inputLbs, const Vector &inputUbs, @@ -258,26 +258,26 @@ class Layer const Vector &inputUbs, unsigned i, unsigned di ); - + double softmaxERUpperBound( const Vector &inputs, const Vector &outputLb, const Vector &outputUb, unsigned i ); - + double softmaxdERUpperBound( const Vector &inputMids, const Vector &outputLb, const Vector &outputUb, unsigned i, unsigned di ); - + double softmaxLinearLowerBound( const Vector &inputLbs, const Vector &inputUbs, unsigned i ); - + double softmaxLinearUpperBound( const Vector &inputLbs, const Vector &inputUbs, unsigned i ); - + /* Helper functions for symbolic bound tightening */ @@ -293,8 +293,8 @@ class Layer void computeSymbolicBoundsForSoftmax(); void computeSymbolicBoundsForBilinear(); void computeSymbolicBoundsDefault(); - - + + /* Helper functions for parameterised symbolic bound tightening */ diff --git a/src/nlr/LayerOwner.h b/src/nlr/LayerOwner.h index 122c6a216a..b8ae03cb0f 100644 --- a/src/nlr/LayerOwner.h +++ b/src/nlr/LayerOwner.h @@ -17,8 +17,8 @@ #define __LayerOwner_h__ #include "ITableau.h" -#include "Tightening.h" #include "PolygonalTightening.h" +#include "Tightening.h" namespace NLR { diff --git a/src/nlr/MILPSolverBoundTighteningType.h b/src/nlr/MILPSolverBoundTighteningType.h new file mode 100644 index 0000000000..760f82e8d1 --- /dev/null +++ b/src/nlr/MILPSolverBoundTighteningType.h @@ -0,0 +1,46 @@ +/********************* */ +/*! \file MILPSolverBoundTighteningType.h +** \verbatim +** Top contributors (to current version): +** Guy Katz +** This file is part of the Marabou project. +** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS +** in the top-level source directory) and their institutional affiliations. +** All rights reserved. See the file COPYING in the top-level source +** directory for licensing information.\endverbatim +** +** [[ Add lengthier description here ]] + +**/ + +#ifndef __MILPSolverBoundTighteningType_h__ +#define __MILPSolverBoundTighteningType_h__ + +/* + MILP solver bound tightening options +*/ +enum class MILPSolverBoundTighteningType { + // Only encode pure linear constraints in the underlying + // solver, in a way that over-approximates the query + LP_RELAXATION = 0, + LP_RELAXATION_INCREMENTAL = 1, + // Encode linear and integer constraints in the underlying + // solver, in a way that completely captures the query but is + // more expensive to solve + MILP_ENCODING = 2, + MILP_ENCODING_INCREMENTAL = 3, + // Encode full queries and tries to fix relus until fix point + ITERATIVE_PROPAGATION = 4, + // Perform backward analysis + BACKWARD_ANALYSIS_ONCE = 5, + BACKWARD_ANALYSIS_CONVERGE = 6, + // Perform backward analysis using the INVPROP Algorithm (arXiv:2302.01404v4 [cs.LG]) + BACKWARD_ANALYSIS_INVPROP = 7, + // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 + // [cs.SE]) + BACKWARD_ANALYSIS_PREIMAGE_APPROX = 8, + // Option to have no MILP bound tightening performed + NONE = 10, +}; + +#endif // __MILPSolverBoundTighteningType_h__ diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index d41ed22de4..2930770c6d 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -203,7 +203,8 @@ void NetworkLevelReasoner::receivePolygonalTighterBound( PolygonalTightening pol _polygonalBoundTightenings.append( polygonal_tightening ); } -void NetworkLevelReasoner::getConstraintPolygonalTightenings( List &polygonal_tightenings ) +void NetworkLevelReasoner::getConstraintPolygonalTightenings( + List &polygonal_tightenings ) { polygonal_tightenings = _polygonalBoundTightenings; _polygonalBoundTightenings.clear(); diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index f84f0f12ea..f6bf6cf72f 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -103,7 +103,7 @@ class NetworkLevelReasoner : public LayerOwner expressions and obtain tighter bounds (e.g., if the upper bound on the upper bound of a ReLU node is negative, that ReLU is inactive and its output can be set to 0. - + - Parametrised Symbolic: For certain activation functions, there is a continuum of valid symbolic bounds. We receive a map of coefficients in range [0, 1] for every layer index, then compute @@ -121,10 +121,10 @@ class NetworkLevelReasoner : public LayerOwner - getConstraintTightenings: this is the function that an external user calls in order to collect the tighter bounds discovered by the NLR. - + - receiveTighterPolygonalBound: this is a callback from the layer objects, through which they report tighter polygonal bounds. - + - getConstraintPolygonalTightenings: this is the function that an external user calls in order to collect the tighter polygonal bounds discovered by the NLR. @@ -148,7 +148,7 @@ class NetworkLevelReasoner : public LayerOwner void receiveTighterBound( Tightening tightening ); void getConstraintTightenings( List &tightenings ); void clearConstraintTightenings(); - + void receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ); void getConstraintPolygonalTightenings( List &polygonal_tightenings ); void clearConstraintPolygonalTightenings(); diff --git a/src/nlr/PolygonalTightening.h b/src/nlr/PolygonalTightening.h new file mode 100644 index 0000000000..0f811f6fe6 --- /dev/null +++ b/src/nlr/PolygonalTightening.h @@ -0,0 +1,100 @@ +/********************* */ +/*! \file PolygonalTightening.h + ** \verbatim + ** Top contributors (to current version): + ** Duligur Ibeling, Guy Katz, Ido Shmuel + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#ifndef __PolygonalTightening_h__ +#define __PolygonalTightening_h__ + +#include "Map.h" +#include "NeuronIndex.h" + +#include + +class PolygonalTightening +{ +public: + enum PolygonalBoundType { + LB = 0, + UB = 1, + }; + + PolygonalTightening( Map neuronToCoefficient, + double value, + PolygonalBoundType type ) + : _neuronToCoefficient( neuronToCoefficient ) + , _value( value ) + , _type( type ) + { + } + + /* + The coefficient of each neuron. + */ + Map _neuronToCoefficient; + + /* + Its new value. + */ + double _value; + + /* + Whether the tightening tightens the + lower bound or the upper bound. + */ + PolygonalBoundType _type; + + /* + Equality operator. + */ + bool operator==( const PolygonalTightening &other ) const + { + bool allFound = true; + for ( const auto &pair : _neuronToCoefficient ) + { + bool currentFound = false; + for ( const auto &otherPair : other._neuronToCoefficient ) + { + currentFound |= ( pair.first._layer == otherPair.first._layer && + pair.first._neuron == otherPair.first._neuron && + pair.second == otherPair.second ); + } + allFound &= currentFound; + } + bool result = allFound && _value == other._value && _type == other._type; + return result; + } + + void dump() const + { + printf( "PolygonalTightening: x %s %.2lf\n", _type == LB ? ">=" : "<=", _value ); + + for ( const auto &pair : _neuronToCoefficient ) + { + printf( "NeuronIndex: (layer %u, neuron %u), Coefficient: %.2lf )\n", + pair.first._layer, + pair.first._neuron, + pair.second ); + } + } +}; + +#endif // __PolygonalTightening_h__ + +// +// Local Variables: +// compile-command: "make -C ../.. " +// tags-file-name: "../../TAGS" +// c-basic-offset: 4 +// End: +// s diff --git a/src/nlr/Test_NetworkLevelReasoner.h b/src/nlr/Test_NetworkLevelReasoner.h new file mode 100644 index 0000000000..977faa4ed6 --- /dev/null +++ b/src/nlr/Test_NetworkLevelReasoner.h @@ -0,0 +1,12348 @@ +/********************* */ +/*! \file Test_NetworkLevelReasoner.h + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Andrew Wu + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#include "../../engine/tests/MockTableau.h" // TODO: fix this +#include "DeepPolySoftmaxElement.h" +#include "FloatUtils.h" +#include "Layer.h" +#include "NetworkLevelReasoner.h" +#include "Options.h" +#include "Query.h" +#include "Tightening.h" +#include "Vector.h" + +#include + +class MockForNetworkLevelReasoner +{ +public: +}; + +class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite +{ +public: + MockForNetworkLevelReasoner *mock; + + void setUp() + { + TS_ASSERT( mock = new MockForNetworkLevelReasoner ); + } + + void tearDown() + { + TS_ASSERT_THROWS_NOTHING( delete mock ); + } + + void populateNetwork( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithSigmoids( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithAbs( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithSign( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithRound( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithLeakyRelu( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + + void populateNetworkWithMax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x b e + g + y c f + d + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::MAX, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -2 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 2 ); + nlr.setWeight( 0, 1, 1, 3, -3 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 0, 2 ); + + // Mark the Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void populateNetworkWithSoftmax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SOFTMAX, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithBilinear( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x b e + g + y c f + d + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::BILINEAR, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -2 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 2 ); + nlr.setWeight( 0, 1, 1, 3, -3 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 0, 2 ); + + // Mark the Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void populateNetworkWithAbsAndRelu( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round/Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithRoundAndSign( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Round/Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithLeakyReluAndSigmoid( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d f + b + y e g + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the LeakyReLU/Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + } + + void populateNetworkWithSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d + b f + y e + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr ) + { + /* + a + x d + b f + y e + c + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + } + + void populateNetworkSBTRelu( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void populateNetworkSBTReluResidual1( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + -1 + __________________ + / \ + / 1 R -1 1 R 3 1 + x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 + \ / + \ 3 / + \________________________/ + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::RELU, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::RELU, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 1, 5 ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 3 ); + nlr.setWeight( 0, 0, 3, 0, -1 ); + nlr.setWeight( 1, 0, 5, 0, 3 ); + + + nlr.setBias( 3, 0, 1 ); + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + } + + void populateNetworkSBTReluResidual2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + -1 + __________________ + / \ + / 1 R -1 1 R 3 1 1 + x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 --- x6 + \ / + \ 1 / + \_______________________________/ + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 2, NLR::Layer::RELU, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 4, NLR::Layer::RELU, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + nlr.addLayer( 6, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 6; ++i ) + nlr.addLayerDependency( i - 1, i ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 0, 5 ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 4, 0, 5, 0, 3 ); + nlr.setWeight( 0, 0, 3, 0, -1 ); + nlr.setWeight( 0, 0, 5, 0, 1 ); + nlr.setWeight( 5, 0, 6, 0, 1 ); + + nlr.setBias( 3, 0, 1 ); + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 7 ); + tableau.setLowerBound( 1, -large ); + tableau.setUpperBound( 1, large ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + } + + void populateNetworkSBTReluReindex( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 1 1 1 + x0 --- x2 x5 --- x6 x9 --- x10 + \ /\ /\ / \ / \ / + 1 \ / R\ /-1\ / R \ / 1 \ / + \/ \/ \/ \/ \/ + /\ /\ /\ /\ /\ + 1 / \ R/ \ 1/ \ R / \ 1 / \ + / \/ \/ \ / \ / 0 \ + x1 --- x3 x4 --- x7 x8 --- x11 + -1 1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 0 ); + + nlr.setBias( 5, 0, 1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 8 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkSBTLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 LR 1 LR 1 1 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 1 \ / 0 \ / + \/ \/ \/ + /\ /\ /\ + 1 / \ 1 / \ 1 / \ + / \ LR / \ LR / 1 \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + -1 -1 + + The example described in Fig. 3 of + https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf + using LeakyReLU activation instead of ReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.2 ); + nlr.getLayer( 4 )->setAlpha( 0.2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 0 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + + nlr.setBias( 5, 0, 1 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkSBTSigmoidsAndRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 S 1 Rd + x0 --- x2 ---> x4 --- x6 --- x8 + \ / \ / + 1 \ / 1 \ / + \/ \/ + /\ /\ + 1 / \ 1 / \ + / \ S / \ Rd + x1 --- x3 ---> x5 --- x7 --- x9 + -1 -1 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 4; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 10 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + } + + void populateNetworkSBTMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 R Max 2 + x0 --- x2 ---> x4 --- x6 ---> x7 + \ / / + 1 \ / / + \/ / + /\ / + 1 / \ / + / \ R / + x1 --- x3 ---> x5 + -1 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::MAX, 1 ); + nlr.addLayer( 4, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 4; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, -1 ); + nlr.setWeight( 3, 0, 4, 0, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Mark the Max sources + nlr.addActivationSource( 2, 0, 3, 0 ); + nlr.addActivationSource( 2, 1, 3, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 7 ); + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 8 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + } + + void populateNetworkSBTSoftmax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + + x0 x3 S x6 + + x1 x4 S x7 + + x2 x5 S x8 + + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 + + x6 x7 x8 = softmax(x3, x4, x5) + + x9 = x6 + x7 + x8 + x10 = x6 + x7 + x8 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 8 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 11 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + } + + void populateNetworkSBTSoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + + x0 x3 S x8 + + x1 x4 S x9 + + x2 x5 S x10 + + x6 S x11 + + x7 S x12 + + x3 = x0 - x1 + x2 + 1 + x4 = -x0 + x1 + x2 + 2 + x5 = -x0 - x1 - x2 + 3 + x6 = -x0 - x1 - x2 + 2 + x7 = -x0 - x1 - x2 + 1 + + x8 x10 x12 = softmax(x3, x5, x7) + + x9 x11 = softmax(x4, x6) + + x13 = x8 + x10 + x12 + x14 = -x8 - x10 - x12 + x15 = x9 + x11 + x16 = -x9 - x11 + + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 5 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 5 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 4 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, -1 ); + nlr.setWeight( 0, 0, 1, 2, -1 ); + nlr.setWeight( 0, 0, 1, 3, -1 ); + nlr.setWeight( 0, 0, 1, 4, -1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 2, -1 ); + nlr.setWeight( 0, 1, 1, 3, -1 ); + nlr.setWeight( 0, 1, 1, 4, -1 ); + nlr.setWeight( 0, 2, 1, 0, 1 ); + nlr.setWeight( 0, 2, 1, 1, 1 ); + nlr.setWeight( 0, 2, 1, 2, -1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + nlr.setWeight( 0, 2, 1, 4, -1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 4, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + nlr.setWeight( 2, 4, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 2, 1 ); + nlr.setWeight( 2, 3, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 3, -1 ); + nlr.setWeight( 2, 3, 3, 3, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 2 ); + nlr.setBias( 1, 2, 3 ); + nlr.setBias( 1, 3, 2 ); + nlr.setBias( 1, 4, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 4, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 4, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 4 ); + nlr.addActivationSource( 1, 2, 2, 4 ); + nlr.addActivationSource( 1, 4, 2, 4 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 3 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 4 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 4 ), 12 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 3 ), 16 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 17 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + } + + void populateNetworkSBTBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + + x0 x2 + x x4 -- x5 + x1 x3 + + x2 = x0 - 2 * x1 + x3 = x0 + x1 + x4 = -x5 + + x4 = x2 * x3 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -2 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, -1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 5 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 6 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + } + + void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 R -1 R -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ R / \ R / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using ReLU activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = ReLU( x3 ) + x8 = ReLU( x4 ) + x9 = ReLU( x5 ) + x10 = ReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = ReLU( x11 ) + x15 = ReLU( x12 ) + x16 = ReLU( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = ReLU( x17 ) + x20 = ReLU( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::RELU, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 S -1 S -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ S / \ S / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Sigmoid activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Sigmoid( x3 ) + x8 = Sigmoid( x4 ) + x9 = Sigmoid( x5 ) + x10 = Sigmoid( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Sigmoid( x11 ) + x15 = Sigmoid( x12 ) + x16 = Sigmoid( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Sigmoid( x17 ) + x20 = Sigmoid( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 Sign -1 Sign -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ Sign / \ Sign / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Sign activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Sign( x3 ) + x8 = Sign( x4 ) + x9 = SIgn( x5 ) + x10 = Sign( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Sign( x11 ) + x15 = Sign( x12 ) + x16 = Sign( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Sign( x17 ) + x20 = Sign( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 Rnd -1 Rnd -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ Rnd / \ Rnd / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Round activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Round( x3 ) + x8 = Round( x4 ) + x9 = Round( x5 ) + x10 = Round( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Round( x11 ) + x15 = Round( x12 ) + x16 = Round( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Round( x17 ) + x20 = Round( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 A -1 A -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ A / \ A / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Absolute value activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Abs( x3 ) + x8 = Abs( x4 ) + x9 = Abs( x5 ) + x10 = Abs( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Abs( x11 ) + x15 = Abs( x12 ) + x16 = Abs( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Abs( x17 ) + x20 = Abs( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 LR -1 LR -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ LR / \ LR / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = LeakyReLU( x3 ) + x8 = LeakyReLU( x4 ) + x9 = LeakyReLU( x5 ) + x10 = LeakyReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = LeakyReLU( x11 ) + x15 = LeakyReLU( x12 ) + x16 = LeakyReLU( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = LeakyReLU( x17 ) + x20 = LeakyReLU( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + nlr.getLayer( 6 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + a a' + x e + b b' f h + y d + c c' + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + + void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 + x1 x12 x15 x17 + x5 x9 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14, x15, x16 = Softmax( x11, x12, x13 ) + + x17 = Max( x14, x15, x16 ) + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 5, NLR::Layer::MAX, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 3, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 3 ); + nlr.addActivationSource( 1, 1, 2, 3 ); + nlr.addActivationSource( 1, 2, 2, 3 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + nlr.addActivationSource( 3, 2, 4, 0 ); + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 1 ); + nlr.addActivationSource( 3, 0, 4, 2 ); + nlr.addActivationSource( 3, 1, 4, 2 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 4, 0, 5, 0 ); + nlr.addActivationSource( 4, 1, 5, 0 ); + nlr.addActivationSource( 4, 2, 5, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 18 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + } + + void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + a a' + x e + b b' f h + y d + c c' + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + x3 x7 + x0 + x4 x8 x11 x13 + x1 x15 + x5 x9 x12 x14 + x2 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = ReLU( x3 ) + x8 = ReLU( x4 ) + x9 = ReLU( x5 ) + x10 = ReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + + x13 = ReLU( x11 ) + x14 = ReLU( x12 ) + + x15 = x13 * x14 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::BILINEAR, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setBias( 3, 0, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + nlr.addActivationSource( 4, 0, 5, 0 ); + nlr.addActivationSource( 4, 1, 5, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 14 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 15 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 16 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + } + + void test_evaluate_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + double input[2]; + double output[2]; + + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); + + // With ReLUs, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); + } + + void test_evaluate_sigmoids() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSigmoids( nlr ); + + double input[2]; + double output[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); + + // case 3 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); + } + + void test_evaluate_abs() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbs( nlr ); + + double input[2]; + double output[2]; + + // With Abs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Abs, case 1 + input[0] = -2; + input[1] = -2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Abs, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); + } + + void test_evaluate_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSign( nlr ); + + double input[2]; + double output[2]; + + // With Sign, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Sign, case 1 + input[0] = -2; + input[1] = -2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Sign, case 2 (0 considered "non-negative", sign(0)=1) + input[0] = -1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); + } + + void test_evaluate_round() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRound( nlr ); + + double input[2]; + double output[2]; + + // With Round, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Round, case 1 + input[0] = 2.1; + input[1] = 1.4; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); + + // With Round, case 2 + input[0] = 2.1; + input[1] = 1.6; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); + } + + void test_evaluate_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyRelu( nlr ); + + double input[2]; + double output[2]; + + // With Leaky ReLU, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + + // With Leaky ReLU, case 1 (alpha=0.1) + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); + + // With Leaky ReLU, case 2 + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); + } + + void test_evaluate_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithMax( nlr ); + + double input[2]; + double output[1]; + + // With Max, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); + + // With Max, case 1 + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); + + // With Max, case 2 + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); + } + + + void test_evaluate_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + double input[2]; + double output[2]; + + // With Softmax, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); + + // With Softmax, case 1 + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); + + // With Softmax, case 2 + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); + } + + void test_evaluate_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + double input[2]; + double output[1]; + + // With Bilinear, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + + // With Bilinear, case 1 + input[0] = 0.1; + input[1] = -0.3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); + + // With Bilinear, case 2 + input[0] = -0.3; + input[1] = 0.3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); + } + + void test_evaluate_non_consecutive_layers() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); + + // Evaluate + double input[2]; + double output; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); + + input[0] = -1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); + TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); + + TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); + } + + void test_evaluate_abs_and_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbsAndRelu( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + } + + void test_evaluate_round_and_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRoundAndSign( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1.6; + input[1] = 1.4; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); + + input[0] = 1.6; + input[1] = 1.6; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); + } + + void test_evaluate_leaky_relu_and_sigmoid() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); + } + + void test_evaluate_softmax_and_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmaxAndMax( nlr ); + + double input[2]; + double output[1]; + + input[0] = 1; + input[1] = -3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); + + input[0] = -3; + input[1] = 3; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); + } + + void test_evaluate_relu_and_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithReluAndBilinear( nlr ); + + double input[2]; + double output[1]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); + } + + void test_store_into_other() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + // With ReLUs, Inputs are zeros, only biases count + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // With ReLUs, case 1 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_sigmoids() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSigmoids( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_round() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRound( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSign( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_abs() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbs( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyRelu( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithMax( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_store_into_other_with_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + NLR::NetworkLevelReasoner nlr2; + + TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); + + double input[2]; + double output1[2]; + double output2[2]; + + // case 1 + input[0] = 0; + input[1] = 0; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + + // case 2 + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); + TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); + + TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); + TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); + } + + void test_generate_input_query() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Variable indexing + + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + // Set the weights and biases for the weighted sum layers + + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -5 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 1, 1, 0 ); + nlr.setBias( 1, 2, 0 ); + + nlr.setBias( 3, 0, 0 ); + nlr.setBias( 3, 1, 2 ); + + nlr.setBias( 5, 0, 0 ); + nlr.setBias( 5, 1, 0 ); + + // Mark the ReLU/Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Start the testing + Query ipq; + nlr.generateQuery( ipq ); + List unhandledEquations; + Set varsInUnhandledConstraints; + TS_ASSERT( + ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); + NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); + + double input[2]; + double output[2]; + + input[0] = 1; + input[1] = 1; + + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); + + input[0] = 1; + input[1] = 2; + + TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); + + TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); + TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); + } + + void test_simulate_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With ReLUs, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With ReLUs, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 1 ) ); + } + + // With ReLUs, case 1 and 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0 ) ); + } + } + + void test_simulate_sigmoids() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSigmoids( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6750, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 3.0167, + 0.0001 ) ); + } + + // case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.6032, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.5790, + 0.0001 ) ); + } + + // case 3 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.5045, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.1957, + 0.0001 ) ); + } + } + + void test_simulate_round() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRound( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Round, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Round, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 2.1 ) ); + simulations2.append( Vector( simulationSize, 1.4 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4, + 0.0001 ) ); + } + + // With Round, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 2.1 ) ); + simulations3.append( Vector( simulationSize, 1.6 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -12, + 0.0001 ) ); + } + } + + void test_simulate_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSign( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Sign, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Sign, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Sign, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -1 ) ); + simulations3.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4 ) ); + } + } + + void test_simulate_abs() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbs( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Abs, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Abs, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -2 ) ); + simulations2.append( Vector( simulationSize, -2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Abs, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 4 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 10 ) ); + } + } + + void test_simulate_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyRelu( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Leaky ReLU, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + + // With Leaky ReLU, case 1 (alpha=0.1) + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.9, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0.57, + 0.0001 ) ); + } + + // With Leaky ReLU, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, 1 ) ); + simulations3.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -0.04, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -0.76, + 0.0001 ) ); + } + } + + void test_simulate_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithMax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Max, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -3 ) ); + } + + // With Max, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -18 ) ); + } + + // With Max, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -3 ) ); + simulations3.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -5 ) ); + } + } + + void test_simulate_softmax() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Softmax, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.2999, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.4001, + 0.0001 ) ); + } + + // With Softmax, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.1192, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.7615, + 0.0001 ) ); + } + + // With Softmax, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -3 ) ); + simulations3.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.1206, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2.7588, + 0.0001 ) ); + } + } + + void test_simulate_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithBilinear( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Bilinear, Inputs are zeros, only biases count + Vector> simulations1; + simulations1.append( Vector( simulationSize, 0 ) ); + simulations1.append( Vector( simulationSize, 0 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + + // With Bilinear, case 1 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 0.1 ) ); + simulations2.append( Vector( simulationSize, -0.3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2.8304, + 0.0001 ) ); + } + + // With Bilinear, case 2 + Vector> simulations3; + simulations3.append( Vector( simulationSize, -0.3 ) ); + simulations3.append( Vector( simulationSize, 0.3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.0912, + 0.0001 ) ); + } + } + + void test_simulate_non_consecutive_layers() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 0, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 0, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Set the weights and relus + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, 2 ); + nlr.setWeight( 2, 2, 3, 1, -2 ); + nlr.setWeight( 0, 1, 3, 1, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 0, 0, 4, 2 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 2, 5, 0, 1 ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2 ) ); + + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + + void test_simulate_abs_and_relu() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithAbsAndRelu( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // Simulate1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 2 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 2 ) ); + } + + // Simulate2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 4 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 4 ) ); + } + } + + void test_simulate_round_and_sign() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithRoundAndSign( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Round/Sign, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1.6 ) ); + simulations1.append( Vector( simulationSize, 1.4 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -2 ) ); + } + + // With Round/Sign, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1.6 ) ); + simulations2.append( Vector( simulationSize, 1.6 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + -4 ) ); + } + } + + void test_simulate_leaky_relu_and_sigmoid() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With LeakyReLU/Sigmoid, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.7109, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 1.4602, + 0.0001 ) ); + } + + // With LeakyReLU/Sigmoid, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0.4013, + 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 1 ) + .get( i ), + 0.6508, + 0.0001 ) ); + } + } + + void test_simulate_softmax_and_max() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithSoftmaxAndMax( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With LeakyReLU/Sigmoid, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, -3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -2.9998, + 0.0001 ) ); + } + + // With LeakyReLU/Sigmoid, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, -3 ) ); + simulations2.append( Vector( simulationSize, 3 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + -1, + 0.0001 ) ); + } + } + + void test_simulate_relu_and_bilinear() + { + NLR::NetworkLevelReasoner nlr; + + populateNetworkWithReluAndBilinear( nlr ); + + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); + + // With Relu/Bilinear, case 1 + Vector> simulations1; + simulations1.append( Vector( simulationSize, 1 ) ); + simulations1.append( Vector( simulationSize, 1 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 1 ) ); + } + + // With ReLU/Bilinear, case 2 + Vector> simulations2; + simulations2.append( Vector( simulationSize, 1 ) ); + simulations2.append( Vector( simulationSize, 2 ) ); + + TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); + + for ( unsigned i = 0; i < simulationSize; ++i ) + { + TS_ASSERT( FloatUtils::areEqual( + ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) + .get( 0 ) + .get( i ), + 0 ) ); + } + } + + void test_interval_arithmetic_bound_propagation_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_abs_constraints() + { + NLR::NetworkLevelReasoner nlr; + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, 1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 1, 3 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Layer dependenices + nlr.addLayerDependency( 0, 1 ); + nlr.addLayerDependency( 1, 2 ); + nlr.addLayerDependency( 2, 3 ); + nlr.addLayerDependency( 3, 4 ); + nlr.addLayerDependency( 4, 5 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_sign_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSign( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, 3 ); + tableau.setUpperBound( 0, 4 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, 4, Tightening::LB ), Tightening( 2, 5, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 11, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, 1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, 1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, 4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithLeakyRelu( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + + Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), + Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), + + Tightening( 12, -0.28, Tightening::LB ), Tightening( 12, 10.1, Tightening::UB ), + Tightening( 13, -1.42, Tightening::LB ), Tightening( 13, 37.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), + Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + + Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), + Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), + + Tightening( 12, -0.34, Tightening::LB ), Tightening( 12, 7.1, Tightening::UB ), + Tightening( 13, -1.3, Tightening::LB ), Tightening( 13, 29, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_round_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithRound( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, 1.4 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, -1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), + Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), + Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), + Tightening( 13, -25, Tightening::LB ), Tightening( 13, 35, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3.1 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, 1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), + Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), + Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + + Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), + + Tightening( 12, -16, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -61, Tightening::LB ), Tightening( 13, 7, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_sigmoid_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSigmoids( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), + Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), + Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), + + Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), + Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), + + Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), + Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), + + Tightening( 12, 0.4441, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), + Tightening( 13, 2.2364, Tightening::LB ), Tightening( 13, 3.5399, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), + Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + + Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), + Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), + + Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), + Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), + + Tightening( 12, 0.3183, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), + Tightening( 13, 1.9963, Tightening::LB ), Tightening( 13, 3.6263, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_max_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithMax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), + Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), + + Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), + Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), + Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), + + Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_softmax_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSoftmax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), + Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), + + Tightening( 12, 0.0240, Tightening::LB ), Tightening( 12, 0.8349, Tightening::UB ), + Tightening( 13, 0.5192, Tightening::LB ), Tightening( 13, 3.7629, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), + Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), + + Tightening( 12, 0.0184, Tightening::LB ), Tightening( 12, 0.8678, Tightening::UB ), + Tightening( 13, 0.4151, Tightening::LB ), Tightening( 13, 3.8125, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_bilinear_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithBilinear( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -0.1 ); + tableau.setUpperBound( 0, 0.1 ); + tableau.setLowerBound( 1, -0.1 ); + tableau.setUpperBound( 1, 0.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0.9, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), + Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), + Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), + Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), + + Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), + Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), + + Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), + Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), + + Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), + + Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -0.3 ); + tableau.setUpperBound( 0, 0.1 ); + tableau.setLowerBound( 1, -0.1 ); + tableau.setUpperBound( 1, 0.2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, 0.7, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), + Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), + Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), + Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), + + Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), + Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), + + Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), + Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), + + Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), + + Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithAbsAndRelu( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), + + Tightening( 10, -10, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithRoundAndSign( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, 1.4 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, -1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), + Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), + + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3.1 ); + tableau.setUpperBound( 0, 1.6 ); + tableau.setLowerBound( 1, 1.4 ); + tableau.setUpperBound( 1, 2.1 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), + Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), + Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), + + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), + Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), + + Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithLeakyReluAndSigmoid( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 14 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), + Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), + + Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), + Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), + + Tightening( 12, 0.0573, Tightening::LB ), Tightening( 12, 0.9999, Tightening::UB ), + Tightening( 13, 0.1229, Tightening::LB ), Tightening( 13, 3.9996, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), + Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), + + Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), + Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), + + Tightening( 12, 0.0323, Tightening::LB ), Tightening( 12, 0.9992, Tightening::UB ), + Tightening( 13, 0.1498, Tightening::LB ), Tightening( 13, 3.9972, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithSoftmaxAndMax( nlr ); + + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), + Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.0066, Tightening::LB ), + Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), + Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), + Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), + Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), + Tightening( 9, 2.9819, Tightening::UB ), + + Tightening( 10, 0.3192, Tightening::LB ), + Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 11, -2.9819, Tightening::LB ), + Tightening( 11, -0.3192, Tightening::UB ) } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), + Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), + Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), + Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), + Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.0654, Tightening::LB ), + Tightening( 9, 2.9933, Tightening::UB ), + + Tightening( 10, 0.0654, Tightening::LB ), + Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 11, -2.9933, Tightening::LB ), + Tightening( 11, -0.0654, Tightening::UB ) } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() + { + NLR::NetworkLevelReasoner nlr; + populateNetworkWithReluAndBilinear( nlr ); + + MockTableau tableau; + tableau.getBoundManager().initialize( 12 ); + + // Initialize the bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + double large = 1000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + } + + void test_sbt_relus_all_active() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relus_active_and_inactive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relus_active_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. + Coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = 0.5x0 + 1.25x1 - 11.25 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relus_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTRelu( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externally), bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relu_residual1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual1( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [-1, 1] + + Layer 1: + + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. + Coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layer 2 (with residual from x0): + + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. + Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + x4.lb = 0 + x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] + x4 range: [0, 2.5] + + Layer 3 (with residual from x1): + + x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] + + x5 range: [-2, 4] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 2, Tightening::LB ), + Tightening( 5, 5.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relu_residual2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluResidual2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [-1, 1] + + Layer 1: + + x1.lb = x0 : [-1, 1] + x1.ub = x0 : [-1, 1] + + ReLU is undecided, bound is concretized. + Coefficient: 1/( 1--1 ) = 1/2 = 0.5 + + x2.lb = 0.5x0 + x2.ub = 0.5x0 + 0.5 + x2 range: [0, 1] + + Layer 2 (with residual from x0): + + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] + x3 range: [-1, 2.5] + + ReLU is undecided, bound is concretized. + Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. + + x4.lb = 0 + x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] + x4 range: [0, 2.5] + + Layer 3 (with residual from x0): + + x5.lb = 3 ( 0 ) + 1 ( x0 ) + 1 = 1x0 + 1 : [0, 2] + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 + = 7.5] x5 range: [0, 7.5] + + Layer 4: + x6.lb = 1x0 + 1 : [0, 2] + x6.ub = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] + x6 range: [0, 7.5] + */ + + List expectedBounds( { + Tightening( 1, -1, Tightening::LB ), + Tightening( 1, 1, Tightening::UB ), + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 2.5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 2.5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 7.5, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 7.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_relu_reindex() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTReluReindex( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both ReLUs are undecided, bounds are concretized. + Coefficient: 2/( 2--2 ) = 2/4 = 0.5 + + x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 + x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 + x4 range: [0, 2] + + x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 + x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 + x5 range: [0, 2] + + Layer 2: + + x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] + x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] + x6 range: [-1, 3] + + x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] + x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] + x7 range: [-2, 2] + + Both ReLUs are undecided, bounds are concretized. + Coefficient (first ReLU, lower): 1/( 1--1 ) = 1/2 = 0.5 + Coefficient (first ReLU, upper): 1 (propagated as is) + Coefficient (second ReLU, lower): 0 (bound is zero) + Coefficient (second ReLU, upper): 2/( 2--2 ) = 2/4 = 0.5 + + x8.lb = 0.5 ( x0 ) = 0.5x0 + x8.ub = x0 + 2 + x8 range: [0, 3] + + x9.lb = 0 + x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 + x9 range: [0, 2] + + Layer 3: + + x10.lb = 1 ( 0.5x0 ) + 1 ( 0 ) + 1 = 0.5x0 + 1 : [0.5, 1.5] + x10.ub = 1 ( x0 + 2 ) + 1 ( 0.5x1 + 1.5 ) + 1 = x0 + 0.5x1 + 4.5 : [3, 6] + x10 range: [0.5, 6] + + x11.lb = 0.5x1 - 0.5 + x11.ub = 0.5x1 + 1.5 + x11 range: [0, 2] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, 0.5, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_abs_all_positive() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both absolute values positive, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds( { + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_abs_positive_and_negative() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is negative, bounds get flipped + Second absolute value is positive, bounds surive the activation + + x4.lb = -2x0 -3x1 + 30 : [3, 19] + x4.ub = -2x0 -3x1 + 30 : [3, 19] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] + x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] + */ + + List expectedBounds( { + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 19, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_absolute_values_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First absolute value is undecided, bounds are concretized. + Second absolute value is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0 + x4.ub = 12 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 + 12 : [ 1, 7] + + x6 range: [-11, 7] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, 7, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_absolute_values_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2 is eliminated, everything set to -3 + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Second absolute value is positive, bounds surive the activation + + x4: all set to 3 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 + 3 : [-8, -2] + x6.ub = - x0 - x1 + 3 : [-8, -2] + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_signs_positive_and_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is undecided, bounds are concretized. + Second sign is active, bounds become constant 1 + Coefficient (first Sign, lower): 2/12 = 1/6. + Coefficient (first Sign, upper): -2/-4 = 1/2. + + x4 range: [-1, 1] + x4.lb = 1/6 ( 2x0 + 3x1 - 15 ) - 1 = 2/6 x0 + 3/6 x1 - 21/6 + x4.ub = 1/2 ( 2x0 + 3x1 - 15 ) + 1 = x0 + 1.5x1 - 6.5 + + x5 range: [1, 1] + x5.lb = 1 + x5.ub = 1 + + Layer 2: + + x6.lb = 1 ( 2/6 x0 + 3/6 x1 - 21/6 ) - 1 ( 1 ) = 2/6 x0 + 3/6 x1 - 27/6 : [-16/6, 0] + x6.ub = 1 ( x0 + 1.5x1 - 6.5 ) - 1 ( 1 ) = x0 + 1.5x1 - 7.5 : [-2, 6] + + x6 range: [-8/3, 6] + */ + + List expectedBounds( { + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2.6667, Tightening::LB ), + Tightening( 6, 6, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_signs_active_and_externally_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + tableau.getBoundManager().initialize( 7 ); + nlr.setTableau( &tableau ); + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 3; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Weights + nlr.setWeight( 0, 0, 1, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, the weighted sum variable has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2 is eliminated, everything set to -3 + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First sign is negative, bounds become constant -1 + Second sign is positive, bounds become constant 1 + + x4: all set to -1 + + x5: all set to 1 + + Layer 2: + + x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 + x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 + */ + + List expectedBounds( { + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), + Tightening( 4, -1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), + Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, -2, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_leaky_relu() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 1] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 2] + x2.ub = x0 + x1 : [-2, 2] + + x3.lb = x0 - x1 : [-2, 2] + x3.ub = x0 - x1 : [-2, 2] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 + Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 + + x4.lb = x0 + x1 + x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 + x4 range: [-0.4, 2] + + x5.lb = x0 - x1 + x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 + x5 range: [-0.4, 2] + + Layer 2: + + x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] + x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] + x6 range: [-2, 2.8] + + x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] + x7 range: [-2.8, 2.8] + + Both LeakyReLUs are undecided, bounds are concretized. + Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 + Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 + + Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 + Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 + + x8.lb = 2x0 + x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 + x8 range: [-0.4, 2.8] + + x9.lb = 0.4x0 + 1.6x1 - 0.8 + x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 + x9 range: [-0.56, 2.8] + + Layer 3: + + x10.lb = 1 ( 0.4x0 + 1.6x1 - 0.8 ) + 1 ( 2x0 ) + 1 = 2.4x0 + 1.6x1 + 0.2 : [-3.8, 5.2] + x10.ub = 1 ( -0.24 x0 + 0.96 x1 + 1.6 ) + 1 ( 0.8x0 + 2 ) + 1 = 0.56x0 + 0.96x1 + 4.6 : + [3.08, 6.12] x10 range: [-3.8, 6.12] + + x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] + x11.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] + x11 range: [-2.8, 2.8] + + */ + + List expectedBounds( + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), + Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), + + Tightening( 8, -0.4, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -0.56, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), + + Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 6.12, Tightening::UB ), + Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_sigmoids_and_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSigmoidsAndRound( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + // Layer 1 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); + + // Layer 2 + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); + + // Layer 3 + /* + Double-check with Python + --- + from math import exp as e + def g(x): + return 1 / (1 + e(-x)) + + def g_prime(x): + return g(x) * (1 - g(x)) + + def lam(l, u): + return (g(u) - g(l)) / (u - l) + + def lam_prime(l, u): + return min(g_prime(l), g_prime(u)) + + l3 = l4 = -2 + u3 = u4 = 2 + l5 = l6 = g(-2) + u5 = u6 = g(2) + lambda7 = lam(l3, u3) + lambda7_prime = lam_prime(l3, u3) + lambda8 = lam(l4, u4) + lambda8_prime = lam_prime(l4, u4) + x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) + x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) + x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) + x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) + print(x7_l) + print(x7_u) + print(x8_l) + print(x8_u) + --- + [output]: + 0.4483930148512481 + 1.5516069851487517 + -0.5516069851487517 + 0.5516069851487517 + */ + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); + + // Layer 4 + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); + TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); + } + + void test_sbt_max_not_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [-1, 1] + x1: [-1, 2] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 3] + x2.ub = x0 + x1 : [-2, 3] + + x3.lb = x0 - x1 : [-3, 2] + x3.ub = x0 - x1 : [-3, 2] + + Both ReLUs are undecided, bounds are concretized. + Coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6 + Coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 + + x4.lb = 0.6 ( x0 + x1 ) = 0.6x0 + 0.6x1 + x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 + x4 range: [0, 3] + + x5.lb = 0.4 ( x0 - x1 ) = 0.4x0 + 0.4x1 + x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 + x5 range: [0, 2] + + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub + Max inherits lower bound from x4, and its upper bound is constant 3. + + x6.lb = 0.6x0 + 0.6x1 : [-1.2, 1.8] + x6.ub = 3 : [3, 3] + x6 range: [-1.2, 3] + + Layer 3: + + x7.lb = 2 ( 0.6x0 + 0.6x1 ) = 1.2x0 + 1.8x1 : [-2.4, 3.6] + x7.ub = 2 ( 3 ) = 6 : [6, 6] + x7 range: [-2.4, 6] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 3, Tightening::UB ), + Tightening( 3, -3, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), + Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -2.4, Tightening::LB ), + Tightening( 7, 6, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_max_fixed() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTMax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -3 ); + tableau.setUpperBound( 1, -2 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-3, -2] + + Layer 1: + + x2.lb = x0 + x1 : [-2, 0] + x2.ub = x0 + x1 : [-2, 0] + + x3.lb = x0 - x1 : [3, 5] + x3.ub = x0 - x1 : [3, 5] + + First ReLU is negative, bounds become constant 0 + Second ReLU is positive, bounds survive the activation + + x4: all set to 0 + + x5.lb = x0 - x1 : [3, 5] + x5.ub = x0 - x1 : [3, 5] + + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds + + x6.lb = x0 - x1 : [3, 5] + x6.ub = x0 - x1 : [3, 5] + + Layer 3: + + x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] + */ + + List expectedBounds( { + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 0, Tightening::UB ), + Tightening( 3, 3, Tightening::LB ), + Tightening( 3, 5, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 3, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + Tightening( 6, 3, Tightening::LB ), + Tightening( 6, 5, Tightening::UB ), + Tightening( 7, 6, Tightening::LB ), + Tightening( 7, 10, Tightening::UB ), + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_sbt_softmax1() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + } + + void test_sbt_softmax2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + { + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.000001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.000001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.000001 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + List expectedBounds( { Tightening( 3, 2, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), + Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0, Tightening::UB ), + Tightening( 6, 0.2595, Tightening::LB ), + Tightening( 6, 0.2595, Tightening::UB ), + Tightening( 7, 0.7054, Tightening::LB ), + Tightening( 7, 0.7054, Tightening::UB ), + Tightening( 8, 0.0351, Tightening::LB ), + Tightening( 8, 0.0351, Tightening::UB ), + Tightening( 9, 1, Tightening::LB ), + Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, -1, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + } + + void test_sbt_softmax3() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTSoftmax2( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 1.00001 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 1.00001 ); + tableau.setLowerBound( 2, 1 ); + tableau.setUpperBound( 2, 1.00001 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [1, 1.0001] + x1: [1, 1.0001] + x2: [1, 1.0001] + */ + + List expectedBounds( + { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), + Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), + Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), + Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), + Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), + Tightening( 13, 0.9470, Tightening::LB ), Tightening( 13, 0.9470, Tightening::UB ), + Tightening( 14, -0.9470, Tightening::LB ), Tightening( 14, -0.9470, Tightening::UB ), + Tightening( 15, 1.0253, Tightening::LB ), Tightening( 15, 1.0253, Tightening::UB ), + Tightening( 16, -1.0253, Tightening::LB ), Tightening( 16, -1.0253, Tightening::UB ) + + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_softmax_bounds_er() + { + Vector inputLb = { -1, 0, 1 }; + Vector inputUb = { 0, 2, 4 }; + Vector input = { -0.5, 1, 2.5 }; + + double value = NLR::DeepPolySoftmaxElement::ERLowerBound( input, inputLb, inputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0114799, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.00563867, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); + + + Vector outputLb = { 0.2, 0, 0 }; + Vector outputUb = { 0.4, 0.1, 0.1 }; + + value = NLR::DeepPolySoftmaxElement::ERUpperBound( input, outputLb, outputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, -1.44538, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 1.96538, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.358535, 0.00001 ) ); + } + + void test_softmax_bounds_lse1() + { + Vector inputLb = { -1, 0, 1 }; + Vector inputUb = { 0, 2, 3 }; + Vector input = { -0.5, 1, 2 }; + double value = NLR::DeepPolySoftmaxElement::LSELowerBound( input, inputLb, inputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.00703444, 0.001 ) ); + + Vector outputLb = { 0.2, 0, 0 }; + Vector outputUb = { 0.4, 0.1, 0.1 }; + value = NLR::DeepPolySoftmaxElement::LSEUpperBound( input, outputLb, outputUb, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); + TS_ASSERT( FloatUtils::areEqual( value, 0.272204, 0.00001 ) ); + value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); + TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); + } + + void test_sbt_bilinear() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBTBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 1 ); + tableau.setUpperBound( 0, 2 ); + tableau.setLowerBound( 1, -2 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [1, 2] + x1: [-2, 1] + + Layer 1: + + x2.lb = x0 - 2x1 : [-1, 6] + x2.ub = x0 - 2x1 : [-1, 6] + + x3.lb = x0 + x1 : [-1, 3] + x3.ub = x0 + x1 : [-1, 3] + + Coefficients for bilinear layer: + Lower bound: + alpha_l = x3.lb = -1 + beta = x2.lb = -1 + gamma_l = -x2.lb * x3.lb = --1 * -1 = -1 + + Upper bound: + alpha_u = x3.ub = 3 + beta = x2.lb = -1 + gamma_u = -x2.lb * x3.ub = --1 * 3 = 3 + + x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] + x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] + x4 range: [-7, 21] + + Layer 3: + + x7.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] + x7.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] + x4 range: [-21, 5] + */ + + List expectedBounds( { Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 6, Tightening::UB ), + Tightening( 3, -1, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + Tightening( 4, -7, Tightening::LB ), + Tightening( 4, 21, Tightening::UB ), + Tightening( 5, -21, Tightening::LB ), + Tightening( 5, 7, Tightening::UB ) } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + } + + void test_concretize_input_assignment() + { + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + + populateNetwork( nlr ); + + // With ReLUs, Inputs are zeros, only biases count + tableau.nextValues[0] = 0; + tableau.nextValues[1] = 0; + + Map assignment; + + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); + + TS_ASSERT( assignment.size() == 14 ); + TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 4 ) ); + + // With ReLUs, case 1 + tableau.nextValues[0] = 1; + tableau.nextValues[1] = 1; + + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); + + TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 1 ) ); + + // With ReLUs, case 2 + tableau.nextValues[0] = 1; + tableau.nextValues[1] = 2; + + TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); + + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); + TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); + + TS_ASSERT( FloatUtils::areEqual( assignment[12], 0 ) ); + TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); + } + + + void test_obtain_bound_from_ipq() + { + NLR::NetworkLevelReasoner nlr; + populateNetwork( nlr ); + + Query query; + query.setNumberOfVariables( 14 ); + + + // Initialize the bounds + query.setLowerBound( 0, -1 ); + query.setUpperBound( 0, 1 ); + query.setLowerBound( 1, -1 ); + query.setUpperBound( 1, 1 ); + + double large = 1000; + query.setLowerBound( 2, -large ); + query.setUpperBound( 2, large ); + query.setLowerBound( 3, -large ); + query.setUpperBound( 3, large ); + query.setLowerBound( 4, -large ); + query.setUpperBound( 4, large ); + query.setLowerBound( 5, -large ); + query.setUpperBound( 5, large ); + query.setLowerBound( 6, -large ); + query.setUpperBound( 6, large ); + query.setLowerBound( 7, -large ); + query.setUpperBound( 7, large ); + query.setLowerBound( 8, -large ); + query.setUpperBound( 8, large ); + query.setLowerBound( 9, -large ); + query.setUpperBound( 9, large ); + query.setLowerBound( 10, -large ); + query.setUpperBound( 10, large ); + query.setLowerBound( 11, -large ); + query.setUpperBound( 11, large ); + query.setLowerBound( 12, -large ); + query.setUpperBound( 12, large ); + query.setLowerBound( 13, -large ); + query.setUpperBound( 13, large ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds( query ) ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds( { + Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : expectedBounds ) + TS_ASSERT( bounds.exists( bound ) ); + } + + void test_backwards_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 19, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 20, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), + + Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), + Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + + Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), + Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + + Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), + Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), + Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), + + Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), + Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), + + Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), + Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), + + Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), + Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sigmoid2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), + Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), + + Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), + Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), + Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), + + Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), + Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), + Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), + + Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), + Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), + + Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), + Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), + + Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), + Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), + Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), + + Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), + Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), + Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), + + Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), + Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), + Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), + + Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), + Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), + + Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), + Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), + + Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_abs() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), + + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_abs2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + + Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + + Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 11, -4, Tightening::LB ), + Tightening( 11, 6, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -10, Tightening::LB ), + Tightening( 7, 5, Tightening::UB ), + + Tightening( 11, -14, Tightening::LB ), + Tightening( 11, 11, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_round2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), + Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), + Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + + Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), + Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + + Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), + Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + + Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), + Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), + Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), + Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + + Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 14, -0.9, Tightening::LB ), + Tightening( 15, -0.89, Tightening::LB ), + Tightening( 16, -0.77, Tightening::LB ), + + Tightening( 19, -2.3133, Tightening::LB ), + Tightening( 20, -1.2, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), + Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 14, -1.1, Tightening::LB ), + Tightening( 15, -2.1771, Tightening::LB ), + Tightening( 16, -1.15, Tightening::LB ), + + Tightening( 19, -5.6259, Tightening::LB ), + Tightening( 20, -1.9, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_softmax_and_max() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), + Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), + Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + + Tightening( 8, -0.1519, Tightening::LB ), Tightening( 8, 1.4775, Tightening::UB ), + Tightening( 9, 0.6614, Tightening::LB ), Tightening( 9, 2.3899, Tightening::UB ), + + Tightening( 10, 0.6614, Tightening::LB ), Tightening( 10, 2.3899, Tightening::UB ), + + Tightening( 11, -2.3899, Tightening::LB ), Tightening( 11, -0.6614, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.0674, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), + + Tightening( 10, 0.0674, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + + Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.0674, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_softmax_and_max2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), + Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), + Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), + Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + + Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9635, Tightening::UB ), + Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9806, Tightening::UB ), + Tightening( 13, -2.9902, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + + Tightening( 14, 0.1086, Tightening::LB ), Tightening( 14, 0.9971, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8766, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4595, Tightening::UB ), + + Tightening( 17, 0.1086, Tightening::LB ), Tightening( 17, 0.9971, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + + Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9989, Tightening::UB ), + Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), + Tightening( 13, -2.9989, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + + Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9972, Tightening::UB ), + Tightening( 15, 0.0024, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + + Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9972, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + /*List expectedBounds2( + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2.1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.35, Tightening::UB ), + Tightening( 11, 1.35, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) );*/ + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + /*List expectedBounds4( + { Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) );*/ + } + + bool boundsEqual( const List &bounds, const List &expectedBounds ) + { + if ( bounds.size() != expectedBounds.size() ) + return false; + + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } + + void updateTableau( MockTableau &tableau, List &tightenings ) + { + ASSERT( tableau ); + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + { + tableau.setLowerBound( tightening._variable, tightening._value ); + } + + if ( tightening._type == Tightening::UB ) + { + tableau.setUpperBound( tightening._variable, tightening._value ); + } + } + } +}; From 22f0eaa7cb580a63b84bbe8b36a490d07a22d8f4 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 20:55:32 +0200 Subject: [PATCH 29/72] Delete src/nlr/PolygonalTightening.h --- src/nlr/PolygonalTightening.h | 100 ---------------------------------- 1 file changed, 100 deletions(-) delete mode 100644 src/nlr/PolygonalTightening.h diff --git a/src/nlr/PolygonalTightening.h b/src/nlr/PolygonalTightening.h deleted file mode 100644 index 0f811f6fe6..0000000000 --- a/src/nlr/PolygonalTightening.h +++ /dev/null @@ -1,100 +0,0 @@ -/********************* */ -/*! \file PolygonalTightening.h - ** \verbatim - ** Top contributors (to current version): - ** Duligur Ibeling, Guy Katz, Ido Shmuel - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - -**/ - -#ifndef __PolygonalTightening_h__ -#define __PolygonalTightening_h__ - -#include "Map.h" -#include "NeuronIndex.h" - -#include - -class PolygonalTightening -{ -public: - enum PolygonalBoundType { - LB = 0, - UB = 1, - }; - - PolygonalTightening( Map neuronToCoefficient, - double value, - PolygonalBoundType type ) - : _neuronToCoefficient( neuronToCoefficient ) - , _value( value ) - , _type( type ) - { - } - - /* - The coefficient of each neuron. - */ - Map _neuronToCoefficient; - - /* - Its new value. - */ - double _value; - - /* - Whether the tightening tightens the - lower bound or the upper bound. - */ - PolygonalBoundType _type; - - /* - Equality operator. - */ - bool operator==( const PolygonalTightening &other ) const - { - bool allFound = true; - for ( const auto &pair : _neuronToCoefficient ) - { - bool currentFound = false; - for ( const auto &otherPair : other._neuronToCoefficient ) - { - currentFound |= ( pair.first._layer == otherPair.first._layer && - pair.first._neuron == otherPair.first._neuron && - pair.second == otherPair.second ); - } - allFound &= currentFound; - } - bool result = allFound && _value == other._value && _type == other._type; - return result; - } - - void dump() const - { - printf( "PolygonalTightening: x %s %.2lf\n", _type == LB ? ">=" : "<=", _value ); - - for ( const auto &pair : _neuronToCoefficient ) - { - printf( "NeuronIndex: (layer %u, neuron %u), Coefficient: %.2lf )\n", - pair.first._layer, - pair.first._neuron, - pair.second ); - } - } -}; - -#endif // __PolygonalTightening_h__ - -// -// Local Variables: -// compile-command: "make -C ../.. " -// tags-file-name: "../../TAGS" -// c-basic-offset: 4 -// End: -// s From 24f2be1278c0a2ee7afeac8830e8bee33e33dc17 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 20:56:07 +0200 Subject: [PATCH 30/72] Delete src/nlr/Test_NetworkLevelReasoner.h --- src/nlr/Test_NetworkLevelReasoner.h | 12348 -------------------------- 1 file changed, 12348 deletions(-) delete mode 100644 src/nlr/Test_NetworkLevelReasoner.h diff --git a/src/nlr/Test_NetworkLevelReasoner.h b/src/nlr/Test_NetworkLevelReasoner.h deleted file mode 100644 index 977faa4ed6..0000000000 --- a/src/nlr/Test_NetworkLevelReasoner.h +++ /dev/null @@ -1,12348 +0,0 @@ -/********************* */ -/*! \file Test_NetworkLevelReasoner.h - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz, Andrew Wu - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - -**/ - -#include "../../engine/tests/MockTableau.h" // TODO: fix this -#include "DeepPolySoftmaxElement.h" -#include "FloatUtils.h" -#include "Layer.h" -#include "NetworkLevelReasoner.h" -#include "Options.h" -#include "Query.h" -#include "Tightening.h" -#include "Vector.h" - -#include - -class MockForNetworkLevelReasoner -{ -public: -}; - -class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -{ -public: - MockForNetworkLevelReasoner *mock; - - void setUp() - { - TS_ASSERT( mock = new MockForNetworkLevelReasoner ); - } - - void tearDown() - { - TS_ASSERT_THROWS_NOTHING( delete mock ); - } - - void populateNetwork( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithSigmoids( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithAbs( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithSign( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithRound( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithLeakyRelu( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - - void populateNetworkWithMax( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x b e - g - y c f - d - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::MAX, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -2 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 2 ); - nlr.setWeight( 0, 1, 1, 3, -3 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 0, 2 ); - - // Mark the Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - void populateNetworkWithSoftmax( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SOFTMAX, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Softmax sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 0, 4, 1 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithBilinear( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x b e - g - y c f - d - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::BILINEAR, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -2 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 2 ); - nlr.setWeight( 0, 1, 1, 3, -3 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 0, 2 ); - - // Mark the Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - void populateNetworkWithAbsAndRelu( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Round/Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithRoundAndSign( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Round/Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithLeakyReluAndSigmoid( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d f - b - y e g - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the LeakyReLU/Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - } - - void populateNetworkWithSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d - b f - y e - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr ) - { - /* - a - x d - b f - y e - c - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - } - - void populateNetworkSBTRelu( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - 2 R 1 - x0 --- x2 ---> x4 --- x6 - \ / / - 1 \ / / - \/ -1 / - /\ / - 3 / \ / - / \ R / - x1 --- x3 ---> x5 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 7 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - } - - void populateNetworkSBTReluResidual1( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - -1 - __________________ - / \ - / 1 R -1 1 R 3 1 - x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 - \ / - \ 3 / - \________________________/ - - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.addLayer( 2, NLR::Layer::RELU, 1 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.addLayer( 4, NLR::Layer::RELU, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 1, 5 ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 4, 0, 5, 0, 3 ); - nlr.setWeight( 0, 0, 3, 0, -1 ); - nlr.setWeight( 1, 0, 5, 0, 3 ); - - - nlr.setBias( 3, 0, 1 ); - nlr.setBias( 5, 0, 1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 6 ); - tableau.setLowerBound( 1, -large ); - tableau.setUpperBound( 1, large ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - } - - void populateNetworkSBTReluResidual2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - -1 - __________________ - / \ - / 1 R -1 1 R 3 1 1 - x0 --- x1 ---> x2 --- x3 ---> x4 --- x5 --- x6 - \ / - \ 1 / - \_______________________________/ - - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 1 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.addLayer( 2, NLR::Layer::RELU, 1 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.addLayer( 4, NLR::Layer::RELU, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - nlr.addLayer( 6, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 6; ++i ) - nlr.addLayerDependency( i - 1, i ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 0, 5 ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 4, 0, 5, 0, 3 ); - nlr.setWeight( 0, 0, 3, 0, -1 ); - nlr.setWeight( 0, 0, 5, 0, 1 ); - nlr.setWeight( 5, 0, 6, 0, 1 ); - - nlr.setBias( 3, 0, 1 ); - nlr.setBias( 5, 0, 1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 7 ); - tableau.setLowerBound( 1, -large ); - tableau.setUpperBound( 1, large ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - } - - void populateNetworkSBTReluReindex( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 1 1 1 - x0 --- x2 x5 --- x6 x9 --- x10 - \ /\ /\ / \ / \ / - 1 \ / R\ /-1\ / R \ / 1 \ / - \/ \/ \/ \/ \/ - /\ /\ /\ /\ /\ - 1 / \ R/ \ 1/ \ R / \ 1 / \ - / \/ \/ \ / \ / 0 \ - x1 --- x3 x4 --- x7 x8 --- x11 - -1 1 - - The example described in Fig. 3 of - https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 0 ); - - nlr.setBias( 5, 0, 1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - - nlr.addActivationSource( 3, 0, 4, 1 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 8 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkSBTLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 LR 1 LR 1 1 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 1 \ / 0 \ / - \/ \/ \/ - /\ /\ /\ - 1 / \ 1 / \ 1 / \ - / \ LR / \ LR / 1 \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - -1 -1 - - The example described in Fig. 3 of - https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf - using LeakyReLU activation instead of ReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.2 ); - nlr.getLayer( 4 )->setAlpha( 0.2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, 1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 0 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - - nlr.setBias( 5, 0, 1 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkSBTSigmoidsAndRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 S 1 Rd - x0 --- x2 ---> x4 --- x6 --- x8 - \ / \ / - 1 \ / 1 \ / - \/ \/ - /\ /\ - 1 / \ 1 / \ - / \ S / \ Rd - x1 --- x3 ---> x5 --- x7 --- x9 - -1 -1 - - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 4; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, 1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - // Mark the Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 10 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - } - - void populateNetworkSBTMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 R Max 2 - x0 --- x2 ---> x4 --- x6 ---> x7 - \ / / - 1 \ / / - \/ / - /\ / - 1 / \ / - / \ R / - x1 --- x3 ---> x5 - -1 - - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::MAX, 1 ); - nlr.addLayer( 4, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 4; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, -1 ); - nlr.setWeight( 3, 0, 4, 0, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Mark the Max sources - nlr.addActivationSource( 2, 0, 3, 0 ); - nlr.addActivationSource( 2, 1, 3, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 7 ); - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 8 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - } - - void populateNetworkSBTSoftmax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - - x0 x3 S x6 - - x1 x4 S x7 - - x2 x5 S x8 - - x3 = x0 - x1 + x2 + 1 - x4 = -x0 + x1 + x2 + 2 - x5 = -x0 - x1 - x2 + 3 - - x6 x7 x8 = softmax(x3, x4, x5) - - x9 = x6 + x7 + x8 - x10 = x6 + x7 + x8 - - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -1 ); - nlr.setWeight( 0, 0, 1, 2, -1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 2, -1 ); - nlr.setWeight( 0, 2, 1, 0, 1 ); - nlr.setWeight( 0, 2, 1, 1, 1 ); - nlr.setWeight( 0, 2, 1, 2, -1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 1, 1, 2 ); - nlr.setBias( 1, 2, 3 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 8 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 11 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - } - - void populateNetworkSBTSoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - - x0 x3 S x8 - - x1 x4 S x9 - - x2 x5 S x10 - - x6 S x11 - - x7 S x12 - - x3 = x0 - x1 + x2 + 1 - x4 = -x0 + x1 + x2 + 2 - x5 = -x0 - x1 - x2 + 3 - x6 = -x0 - x1 - x2 + 2 - x7 = -x0 - x1 - x2 + 1 - - x8 x10 x12 = softmax(x3, x5, x7) - - x9 x11 = softmax(x4, x6) - - x13 = x8 + x10 + x12 - x14 = -x8 - x10 - x12 - x15 = x9 + x11 - x16 = -x9 - x11 - - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 5 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 5 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 4 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, -1 ); - nlr.setWeight( 0, 0, 1, 2, -1 ); - nlr.setWeight( 0, 0, 1, 3, -1 ); - nlr.setWeight( 0, 0, 1, 4, -1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 2, -1 ); - nlr.setWeight( 0, 1, 1, 3, -1 ); - nlr.setWeight( 0, 1, 1, 4, -1 ); - nlr.setWeight( 0, 2, 1, 0, 1 ); - nlr.setWeight( 0, 2, 1, 1, 1 ); - nlr.setWeight( 0, 2, 1, 2, -1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - nlr.setWeight( 0, 2, 1, 4, -1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 4, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - nlr.setWeight( 2, 4, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 2, 1 ); - nlr.setWeight( 2, 3, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 3, -1 ); - nlr.setWeight( 2, 3, 3, 3, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 1, 1, 2 ); - nlr.setBias( 1, 2, 3 ); - nlr.setBias( 1, 3, 2 ); - nlr.setBias( 1, 4, 1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 4, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 4, 2, 2 ); - nlr.addActivationSource( 1, 0, 2, 4 ); - nlr.addActivationSource( 1, 2, 2, 4 ); - nlr.addActivationSource( 1, 4, 2, 4 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 3 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 4 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 4 ), 12 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 13 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 3 ), 16 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 17 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - } - - void populateNetworkSBTBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - - x0 x2 - x x4 -- x5 - x1 x3 - - x2 = x0 - 2 * x1 - x3 = x0 + x1 - x4 = -x5 - - x4 = x2 * x3 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -2 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, -1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 5 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 6 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - } - - void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 R -1 R -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ R / \ R / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using ReLU activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = ReLU( x3 ) - x8 = ReLU( x4 ) - x9 = ReLU( x5 ) - x10 = ReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = ReLU( x11 ) - x15 = ReLU( x12 ) - x16 = ReLU( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = ReLU( x17 ) - x20 = ReLU( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::RELU, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 S -1 S -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ S / \ S / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Sigmoid activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Sigmoid( x3 ) - x8 = Sigmoid( x4 ) - x9 = Sigmoid( x5 ) - x10 = Sigmoid( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Sigmoid( x11 ) - x15 = Sigmoid( x12 ) - x16 = Sigmoid( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Sigmoid( x17 ) - x20 = Sigmoid( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 Sign -1 Sign -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ Sign / \ Sign / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Sign activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Sign( x3 ) - x8 = Sign( x4 ) - x9 = SIgn( x5 ) - x10 = Sign( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Sign( x11 ) - x15 = Sign( x12 ) - x16 = Sign( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Sign( x17 ) - x20 = Sign( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 Rnd -1 Rnd -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ Rnd / \ Rnd / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Round activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Round( x3 ) - x8 = Round( x4 ) - x9 = Round( x5 ) - x10 = Round( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Round( x11 ) - x15 = Round( x12 ) - x16 = Round( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Round( x17 ) - x20 = Round( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 A -1 A -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ A / \ A / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Absolute value activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Abs( x3 ) - x8 = Abs( x4 ) - x9 = Abs( x5 ) - x10 = Abs( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Abs( x11 ) - x15 = Abs( x12 ) - x16 = Abs( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Abs( x17 ) - x20 = Abs( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 LR -1 LR -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ LR / \ LR / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = LeakyReLU( x3 ) - x8 = LeakyReLU( x4 ) - x9 = LeakyReLU( x5 ) - x10 = LeakyReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = LeakyReLU( x11 ) - x15 = LeakyReLU( x12 ) - x16 = LeakyReLU( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = LeakyReLU( x17 ) - x20 = LeakyReLU( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - nlr.getLayer( 6 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - a a' - x e - b b' f h - y d - c c' - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - - void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 - x1 x12 x15 x17 - x5 x9 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14, x15, x16 = Softmax( x11, x12, x13 ) - - x17 = Max( x14, x15, x16 ) - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 5, NLR::Layer::MAX, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 3, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 2 ); - nlr.addActivationSource( 1, 0, 2, 3 ); - nlr.addActivationSource( 1, 1, 2, 3 ); - nlr.addActivationSource( 1, 2, 2, 3 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - nlr.addActivationSource( 3, 2, 4, 0 ); - nlr.addActivationSource( 3, 0, 4, 1 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 1 ); - nlr.addActivationSource( 3, 0, 4, 2 ); - nlr.addActivationSource( 3, 1, 4, 2 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 4, 0, 5, 0 ); - nlr.addActivationSource( 4, 1, 5, 0 ); - nlr.addActivationSource( 4, 2, 5, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 18 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - } - - void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - a a' - x e - b b' f h - y d - c c' - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - x3 x7 - x0 - x4 x8 x11 x13 - x1 x15 - x5 x9 x12 x14 - x2 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = ReLU( x3 ) - x8 = ReLU( x4 ) - x9 = ReLU( x5 ) - x10 = ReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - - x13 = ReLU( x11 ) - x14 = ReLU( x12 ) - - x15 = x13 * x14 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::BILINEAR, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setBias( 3, 0, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - nlr.addActivationSource( 4, 0, 5, 0 ); - nlr.addActivationSource( 4, 1, 5, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 13 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 14 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 15 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 16 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - } - - void test_evaluate_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); - - double input[2]; - double output[2]; - - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1 ) ); - - // With ReLUs, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); - } - - void test_evaluate_sigmoids() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - double input[2]; - double output[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6750, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 3.0167, 0.0001 ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.6032, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.5790, 0.0001 ) ); - - // case 3 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); - } - - void test_evaluate_abs() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbs( nlr ); - - double input[2]; - double output[2]; - - // With Abs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Abs, case 1 - input[0] = -2; - input[1] = -2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Abs, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); - } - - void test_evaluate_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSign( nlr ); - - double input[2]; - double output[2]; - - // With Sign, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Sign, case 1 - input[0] = -2; - input[1] = -2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Sign, case 2 (0 considered "non-negative", sign(0)=1) - input[0] = -1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); - } - - void test_evaluate_round() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRound( nlr ); - - double input[2]; - double output[2]; - - // With Round, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Round, case 1 - input[0] = 2.1; - input[1] = 1.4; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - - // With Round, case 2 - input[0] = 2.1; - input[1] = 1.6; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); - } - - void test_evaluate_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - double input[2]; - double output[2]; - - // With Leaky ReLU, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // With Leaky ReLU, case 1 (alpha=0.1) - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.9, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.57, 0.0001 ) ); - - // With Leaky ReLU, case 2 - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); - } - - void test_evaluate_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithMax( nlr ); - - double input[2]; - double output[1]; - - // With Max, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -3 ) ); - - // With Max, case 1 - input[0] = 1; - input[1] = -3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -18 ) ); - - // With Max, case 2 - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); - } - - - void test_evaluate_softmax() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); - - double input[2]; - double output[2]; - - // With Softmax, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.2999, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.4001, 0.0001 ) ); - - // With Softmax, case 1 - input[0] = 1; - input[1] = -3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7615, 0.0001 ) ); - - // With Softmax, case 2 - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); - } - - void test_evaluate_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithBilinear( nlr ); - - double input[2]; - double output[1]; - - // With Bilinear, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - - // With Bilinear, case 1 - input[0] = 0.1; - input[1] = -0.3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 2.8304, 0.0001 ) ); - - // With Bilinear, case 2 - input[0] = -0.3; - input[1] = 0.3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); - } - - void test_evaluate_non_consecutive_layers() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Set the weights and relus - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); - - // Evaluate - double input[2]; - double output; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 2 ) ); - - input[0] = -1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, &output ) ); - TS_ASSERT( FloatUtils::areEqual( output, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.computeSuccessorLayers() ); - - TS_ASSERT_EQUALS( nlr.getLayer( 0 )->getSuccessorLayers(), Set( { 1, 3, 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 1 )->getSuccessorLayers(), Set( { 2 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 2 )->getSuccessorLayers(), Set( { 3 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 3 )->getSuccessorLayers(), Set( { 4 } ) ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getSuccessorLayers(), Set( { 5 } ) ); - } - - void test_evaluate_abs_and_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbsAndRelu( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - } - - void test_evaluate_round_and_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRoundAndSign( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1.6; - input[1] = 1.4; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -2, 0.0001 ) ); - - input[0] = 1.6; - input[1] = 1.6; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); - } - - void test_evaluate_leaky_relu_and_sigmoid() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyReluAndSigmoid( nlr ); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.7109, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 1.4602, 0.0001 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); - } - - void test_evaluate_softmax_and_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmaxAndMax( nlr ); - - double input[2]; - double output[1]; - - input[0] = 1; - input[1] = -3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -2.9998, 0.0001 ) ); - - input[0] = -3; - input[1] = 3; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); - } - - void test_evaluate_relu_and_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithReluAndBilinear( nlr ); - - double input[2]; - double output[1]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - } - - void test_store_into_other() - { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - // With ReLUs, Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // With ReLUs, case 1 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_sigmoids() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_round() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRound( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSign( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_abs() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbs( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithMax( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_softmax() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_store_into_other_with_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithBilinear( nlr ); - - NLR::NetworkLevelReasoner nlr2; - - TS_ASSERT_THROWS_NOTHING( nlr.storeIntoOther( nlr2 ) ); - - double input[2]; - double output1[2]; - double output2[2]; - - // case 1 - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - - // case 2 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output1 ) ); - TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); - - TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); - } - - void test_generate_input_query() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Variable indexing - - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 4 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - - // Set the weights and biases for the weighted sum layers - - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -5 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 1, 1, 0 ); - nlr.setBias( 1, 2, 0 ); - - nlr.setBias( 3, 0, 0 ); - nlr.setBias( 3, 1, 2 ); - - nlr.setBias( 5, 0, 0 ); - nlr.setBias( 5, 1, 0 ); - - // Mark the ReLU/Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Start the testing - Query ipq; - nlr.generateQuery( ipq ); - List unhandledEquations; - Set varsInUnhandledConstraints; - TS_ASSERT( - ipq.constructNetworkLevelReasoner( unhandledEquations, varsInUnhandledConstraints ) ); - NLR::NetworkLevelReasoner *reconstructedNlr = ipq.getNetworkLevelReasoner(); - - double input[2]; - double output[2]; - - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 2 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 2 ) ); - - input[0] = 1; - input[1] = 2; - - TS_ASSERT_THROWS_NOTHING( reconstructedNlr->evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - } - - void test_simulate_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetwork( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With ReLUs, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With ReLUs, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 1 ) ); - } - - // With ReLUs, case 1 and 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0 ) ); - } - } - - void test_simulate_sigmoids() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSigmoids( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.6750, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 3.0167, - 0.0001 ) ); - } - - // case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.6032, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.5790, - 0.0001 ) ); - } - - // case 3 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.5045, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.1957, - 0.0001 ) ); - } - } - - void test_simulate_round() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRound( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Round, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Round, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 2.1 ) ); - simulations2.append( Vector( simulationSize, 1.4 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4, - 0.0001 ) ); - } - - // With Round, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 2.1 ) ); - simulations3.append( Vector( simulationSize, 1.6 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -12, - 0.0001 ) ); - } - } - - void test_simulate_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSign( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Sign, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Sign, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -2 ) ); - simulations2.append( Vector( simulationSize, -2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Sign, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -1 ) ); - simulations3.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4 ) ); - } - } - - void test_simulate_abs() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbs( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Abs, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Abs, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -2 ) ); - simulations2.append( Vector( simulationSize, -2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Abs, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 4 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 10 ) ); - } - } - - void test_simulate_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyRelu( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Leaky ReLU, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - - // With Leaky ReLU, case 1 (alpha=0.1) - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.9, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0.57, - 0.0001 ) ); - } - - // With Leaky ReLU, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, 1 ) ); - simulations3.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -0.04, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -0.76, - 0.0001 ) ); - } - } - - void test_simulate_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithMax( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Max, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -3 ) ); - } - - // With Max, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, -3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -18 ) ); - } - - // With Max, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -3 ) ); - simulations3.append( Vector( simulationSize, 3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -5 ) ); - } - } - - void test_simulate_softmax() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmax( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Softmax, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.2999, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.4001, - 0.0001 ) ); - } - - // With Softmax, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, -3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.1192, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.7615, - 0.0001 ) ); - } - - // With Softmax, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -3 ) ); - simulations3.append( Vector( simulationSize, 3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.1206, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2.7588, - 0.0001 ) ); - } - } - - void test_simulate_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithBilinear( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Bilinear, Inputs are zeros, only biases count - Vector> simulations1; - simulations1.append( Vector( simulationSize, 0 ) ); - simulations1.append( Vector( simulationSize, 0 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } - - // With Bilinear, case 1 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 0.1 ) ); - simulations2.append( Vector( simulationSize, -0.3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2.8304, - 0.0001 ) ); - } - - // With Bilinear, case 2 - Vector> simulations3; - simulations3.append( Vector( simulationSize, -0.3 ) ); - simulations3.append( Vector( simulationSize, 0.3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations3 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.0912, - 0.0001 ) ); - } - } - - void test_simulate_non_consecutive_layers() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 0, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 0, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Set the weights and relus - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, 2 ); - nlr.setWeight( 2, 2, 3, 1, -2 ); - nlr.setWeight( 0, 1, 3, 1, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 0, 0, 4, 2 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 2, 5, 0, 1 ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // Simulate1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2 ) ); - - // Simulate2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } - - void test_simulate_abs_and_relu() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithAbsAndRelu( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // Simulate1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 2 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 2 ) ); - } - - // Simulate2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 4 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 4 ) ); - } - } - - void test_simulate_round_and_sign() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithRoundAndSign( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Round/Sign, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1.6 ) ); - simulations1.append( Vector( simulationSize, 1.4 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -2 ) ); - } - - // With Round/Sign, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1.6 ) ); - simulations2.append( Vector( simulationSize, 1.6 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - -4 ) ); - } - } - - void test_simulate_leaky_relu_and_sigmoid() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithLeakyReluAndSigmoid( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With LeakyReLU/Sigmoid, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.7109, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 1.4602, - 0.0001 ) ); - } - - // With LeakyReLU/Sigmoid, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0.4013, - 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 1 ) - .get( i ), - 0.6508, - 0.0001 ) ); - } - } - - void test_simulate_softmax_and_max() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithSoftmaxAndMax( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With LeakyReLU/Sigmoid, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, -3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -2.9998, - 0.0001 ) ); - } - - // With LeakyReLU/Sigmoid, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, -3 ) ); - simulations2.append( Vector( simulationSize, 3 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - -1, - 0.0001 ) ); - } - } - - void test_simulate_relu_and_bilinear() - { - NLR::NetworkLevelReasoner nlr; - - populateNetworkWithReluAndBilinear( nlr ); - - unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); - - // With Relu/Bilinear, case 1 - Vector> simulations1; - simulations1.append( Vector( simulationSize, 1 ) ); - simulations1.append( Vector( simulationSize, 1 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations1 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 1 ) ); - } - - // With ReLU/Bilinear, case 2 - Vector> simulations2; - simulations2.append( Vector( simulationSize, 1 ) ); - simulations2.append( Vector( simulationSize, 2 ) ); - - TS_ASSERT_THROWS_NOTHING( nlr.simulate( &simulations2 ) ); - - for ( unsigned i = 0; i < simulationSize; ++i ) - { - TS_ASSERT( FloatUtils::areEqual( - ( *( nlr.getLayer( nlr.getNumberOfLayers() - 1 )->getSimulations() ) ) - .get( 0 ) - .get( i ), - 0 ) ); - } - } - - void test_interval_arithmetic_bound_propagation_relu_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_abs_constraints() - { - NLR::NetworkLevelReasoner nlr; - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, 1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 1, 3 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Layer dependenices - nlr.addLayerDependency( 0, 1 ); - nlr.addLayerDependency( 1, 2 ); - nlr.addLayerDependency( 2, 3 ); - nlr.addLayerDependency( 3, 4 ); - nlr.addLayerDependency( 4, 5 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 11 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 8, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 41, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_sign_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSign( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, 3 ); - tableau.setUpperBound( 0, 4 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, 4, Tightening::LB ), Tightening( 2, 5, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 1, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 11, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, 1, Tightening::LB ), Tightening( 10, 3, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - - Tightening( 12, 1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, 4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyRelu( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), - - Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), - Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), - - Tightening( 12, -0.28, Tightening::LB ), Tightening( 12, 10.1, Tightening::UB ), - Tightening( 13, -1.42, Tightening::LB ), Tightening( 13, 37.4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), - Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), - - Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), - Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), - - Tightening( 12, -0.34, Tightening::LB ), Tightening( 12, 7.1, Tightening::UB ), - Tightening( 13, -1.3, Tightening::LB ), Tightening( 13, 29, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_round_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithRound( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, 1.4 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, -1.4 ); - tableau.setUpperBound( 1, 2.1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), - Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), - - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), - Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 11, Tightening::UB ), - Tightening( 13, -25, Tightening::LB ), Tightening( 13, 35, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3.1 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, 1.4 ); - tableau.setUpperBound( 1, 2.1 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), - Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), - Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - - Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), - - Tightening( 12, -16, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -61, Tightening::LB ), Tightening( 13, 7, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_sigmoid_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSigmoids( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), - Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), - Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), - - Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), - Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), - - Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), - Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), - - Tightening( 12, 0.4441, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), - Tightening( 13, 2.2364, Tightening::LB ), Tightening( 13, 3.5399, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), - Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - - Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), - Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), - - Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), - Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), - - Tightening( 12, 0.3183, Tightening::LB ), Tightening( 12, 0.8327, Tightening::UB ), - Tightening( 13, 1.9963, Tightening::LB ), Tightening( 13, 3.6263, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_max_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithMax( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), - Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), - - Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), - Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), - Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), - - Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_softmax_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmax( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), - Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), - - Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), - Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), - - Tightening( 12, 0.0240, Tightening::LB ), Tightening( 12, 0.8349, Tightening::UB ), - Tightening( 13, 0.5192, Tightening::LB ), Tightening( 13, 3.7629, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), - - Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), - Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), - - Tightening( 12, 0.0184, Tightening::LB ), Tightening( 12, 0.8678, Tightening::UB ), - Tightening( 13, 0.4151, Tightening::LB ), Tightening( 13, 3.8125, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_bilinear_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithBilinear( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -0.1 ); - tableau.setUpperBound( 0, 0.1 ); - tableau.setLowerBound( 1, -0.1 ); - tableau.setUpperBound( 1, 0.1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0.9, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), - Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), - Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), - Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - - Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), - Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), - - Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), - Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), - - Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), - - Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -0.3 ); - tableau.setUpperBound( 0, 0.1 ); - tableau.setLowerBound( 1, -0.1 ); - tableau.setUpperBound( 1, 0.2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, 0.7, Tightening::LB ), Tightening( 2, 1.1, Tightening::UB ), - Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), - Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), - Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - - Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), - Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), - - Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), - Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), - - Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), - - Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithAbsAndRelu( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -5, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 12, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 14, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 14, Tightening::UB ), - - Tightening( 10, -10, Tightening::LB ), Tightening( 10, 14, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithRoundAndSign( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, 1.4 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, -1.4 ); - tableau.setUpperBound( 1, 2.1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), - Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), - - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3.1 ); - tableau.setUpperBound( 0, 1.6 ); - tableau.setLowerBound( 1, 1.4 ); - tableau.setUpperBound( 1, 2.1 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), - Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), - Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), - Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), - - Tightening( 12, -1, Tightening::LB ), Tightening( 12, 1, Tightening::UB ), - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 4, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithLeakyReluAndSigmoid( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 14 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), - Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), - - Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), - Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), - - Tightening( 12, 0.0573, Tightening::LB ), Tightening( 12, 0.9999, Tightening::UB ), - Tightening( 13, 0.1229, Tightening::LB ), Tightening( 13, 3.9996, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, -0.2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, -1.2, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), - Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), - - Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), - Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), - - Tightening( 12, 0.0323, Tightening::LB ), Tightening( 12, 0.9992, Tightening::UB ), - Tightening( 13, 0.1498, Tightening::LB ), Tightening( 13, 3.9972, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithSoftmaxAndMax( nlr ); - - - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { Tightening( 2, 0, Tightening::LB ), - Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), - Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), - Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.0066, Tightening::LB ), - Tightening( 3, 0.9517, Tightening::UB ), - Tightening( 5, 0.0007, Tightening::LB ), - Tightening( 5, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), - Tightening( 7, 0.7297, Tightening::UB ), - - Tightening( 8, -0.7225, Tightening::LB ), - Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 9, 0.3192, Tightening::LB ), - Tightening( 9, 2.9819, Tightening::UB ), - - Tightening( 10, 0.3192, Tightening::LB ), - Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 11, -2.9819, Tightening::LB ), - Tightening( 11, -0.3192, Tightening::UB ) } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { Tightening( 2, -2, Tightening::LB ), - Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), - Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), - Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), - Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), - Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), - Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.0654, Tightening::LB ), - Tightening( 9, 2.9933, Tightening::UB ), - - Tightening( 10, 0.0654, Tightening::LB ), - Tightening( 10, 2.9933, Tightening::UB ), - - Tightening( 11, -2.9933, Tightening::LB ), - Tightening( 11, -0.0654, Tightening::UB ) } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() - { - NLR::NetworkLevelReasoner nlr; - populateNetworkWithReluAndBilinear( nlr ); - - MockTableau tableau; - tableau.getBoundManager().initialize( 12 ); - - // Initialize the bounds - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - double large = 1000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - nlr.setTableau( &tableau ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - - Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - - Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - } - - void test_sbt_relus_all_active() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Both ReLUs active, bound survive through activations: - - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ - - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relus_active_and_inactive() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is inactive, bounds get zeroed - Second ReLU is active, bounds surive the activation - - x4.lb = 0 - x4.ub = 0 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] - */ - - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relus_active_and_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is undecided, bound is concretized. - Coefficient: 12/(12--4) = 12/16 = 0.75 - Second ReLU is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 - x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = 0.5x0 + 1.25x1 - 11.25 - x6.ub = 0.5x0 + 1.25x1 - 8.25 - - x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relus_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTRelu( nlr, tableau ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, one of the ReLU's variables has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First ReLU is inactive (set externally), bounds get zeroed - Second ReLU is active, bounds surive the activation - - x4.lb = 0 - x4.ub = 0 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 : [-11, -5] - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, -5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relu_residual1() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTReluResidual1( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - - Layer 1: - - x1.lb = x0 : [-1, 1] - x1.ub = x0 : [-1, 1] - - ReLU is undecided, bound is concretized. - Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - - x2.lb = 0.5x0 - x2.ub = 0.5x0 + 0.5 - x2 range: [0, 1] - - Layer 2 (with residual from x0): - - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] - x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] - x3 range: [-1, 2.5] - - ReLU is undecided, bound is concretized. - Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - - x4.lb = 0 - x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] - x4 range: [0, 2.5] - - Layer 3 (with residual from x1): - - x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] - x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] - - x5 range: [-2, 4] - */ - - List expectedBounds( { - Tightening( 1, -1, Tightening::LB ), - Tightening( 1, 1, Tightening::UB ), - Tightening( 2, 0, Tightening::LB ), - Tightening( 2, 1, Tightening::UB ), - Tightening( 3, -1, Tightening::LB ), - Tightening( 3, 2.5, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 2.5, Tightening::UB ), - Tightening( 5, 2, Tightening::LB ), - Tightening( 5, 5.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relu_residual2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTReluResidual2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - - Layer 1: - - x1.lb = x0 : [-1, 1] - x1.ub = x0 : [-1, 1] - - ReLU is undecided, bound is concretized. - Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - - x2.lb = 0.5x0 - x2.ub = 0.5x0 + 0.5 - x2 range: [0, 1] - - Layer 2 (with residual from x0): - - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] - x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] - x3 range: [-1, 2.5] - - ReLU is undecided, bound is concretized. - Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - - x4.lb = 0 - x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] - x4 range: [0, 2.5] - - Layer 3 (with residual from x0): - - x5.lb = 3 ( 0 ) + 1 ( x0 ) + 1 = 1x0 + 1 : [0, 2] - x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 - = 7.5] x5 range: [0, 7.5] - - Layer 4: - x6.lb = 1x0 + 1 : [0, 2] - x6.ub = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] - x6 range: [0, 7.5] - */ - - List expectedBounds( { - Tightening( 1, -1, Tightening::LB ), - Tightening( 1, 1, Tightening::UB ), - Tightening( 2, 0, Tightening::LB ), - Tightening( 2, 1, Tightening::UB ), - Tightening( 3, -1, Tightening::LB ), - Tightening( 3, 2.5, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 2.5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 7.5, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), - Tightening( 6, 7.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_relu_reindex() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTReluReindex( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - x1: [-1, 1] - - Layer 1: - - x2.lb = x0 + x1 : [-2, 2] - x2.ub = x0 + x1 : [-2, 2] - - x3.lb = x0 - x1 : [-2, 2] - x3.ub = x0 - x1 : [-2, 2] - - Both ReLUs are undecided, bounds are concretized. - Coefficient: 2/( 2--2 ) = 2/4 = 0.5 - - x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 - x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 - x4 range: [0, 2] - - x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 - x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 - x5 range: [0, 2] - - Layer 2: - - x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] - x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] - x6 range: [-1, 3] - - x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] - x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] - x7 range: [-2, 2] - - Both ReLUs are undecided, bounds are concretized. - Coefficient (first ReLU, lower): 1/( 1--1 ) = 1/2 = 0.5 - Coefficient (first ReLU, upper): 1 (propagated as is) - Coefficient (second ReLU, lower): 0 (bound is zero) - Coefficient (second ReLU, upper): 2/( 2--2 ) = 2/4 = 0.5 - - x8.lb = 0.5 ( x0 ) = 0.5x0 - x8.ub = x0 + 2 - x8 range: [0, 3] - - x9.lb = 0 - x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 - x9 range: [0, 2] - - Layer 3: - - x10.lb = 1 ( 0.5x0 ) + 1 ( 0 ) + 1 = 0.5x0 + 1 : [0.5, 1.5] - x10.ub = 1 ( x0 + 2 ) + 1 ( 0.5x1 + 1.5 ) + 1 = x0 + 0.5x1 + 4.5 : [3, 6] - x10 range: [0.5, 6] - - x11.lb = 0.5x1 - 0.5 - x11.ub = 0.5x1 + 1.5 - x11 range: [0, 2] - - */ - - List expectedBounds( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, 0.5, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 2, Tightening::UB ) - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_abs_all_positive() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 : [11, 27] - x2.ub = 2x0 + 3x1 : [11, 27] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Both absolute values positive, bound survive through activations: - - x4.lb = 2x0 + 3x1 : [11, 27] - x4.ub = 2x0 + 3x1 : [11, 27] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = x0 + 2x1 : [6, 16] - x6.ub = x0 + 2x1 : [6, 16] - */ - - List expectedBounds( { - Tightening( 2, 11, Tightening::LB ), - Tightening( 2, 27, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 11, Tightening::LB ), - Tightening( 4, 27, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, 6, Tightening::LB ), - Tightening( 6, 16, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_abs_positive_and_negative() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -30 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 30 : [-19, -3] - x2.ub = 2x0 + 3x1 - 30 : [-19, -3] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First absolute value is negative, bounds get flipped - Second absolute value is positive, bounds surive the activation - - x4.lb = -2x0 -3x1 + 30 : [3, 19] - x4.ub = -2x0 -3x1 + 30 : [3, 19] - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - 3x0 - 4x1 + 30 : [-8, 14] - x6.ub = - 3x0 - 4x1 + 30 : [-8, 14] - */ - - List expectedBounds( { - Tightening( 2, -19, Tightening::LB ), - Tightening( 2, -3, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 19, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_absolute_values_positive_and_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First absolute value is undecided, bounds are concretized. - Second absolute value is active, bounds surive the activation - - x4 range: [0, 12] - x4.lb = 0 - x4.ub = 12 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 : [-11, -5] - x6.ub = - x0 - x1 + 12 : [ 1, 7] - - x6 range: [-11, 7] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 12, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -11, Tightening::LB ), - Tightening( 6, 7, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_absolute_values_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, the weighted sum variable has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2 is eliminated, everything set to -3 - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - Second absolute value is positive, bounds surive the activation - - x4: all set to 3 - - x5.lb = x0 + x1 : [5, 11] - x5.ub = x0 + x1 : [5, 11] - - Layer 2: - - x6.lb = - x0 - x1 + 3 : [-8, -2] - x6.ub = - x0 - x1 + 3 : [-8, -2] - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 5, Tightening::LB ), - Tightening( 5, 11, Tightening::UB ), - - Tightening( 6, -8, Tightening::LB ), - Tightening( 6, -2, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_signs_positive_and_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0) - nlr.setBias( 1, 0, -15 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2.lb = 2x0 + 3x1 - 15 : [-4, 12] - x2.ub = 2x0 + 3x1 - 15 : [-4, 12] - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First sign is undecided, bounds are concretized. - Second sign is active, bounds become constant 1 - Coefficient (first Sign, lower): 2/12 = 1/6. - Coefficient (first Sign, upper): -2/-4 = 1/2. - - x4 range: [-1, 1] - x4.lb = 1/6 ( 2x0 + 3x1 - 15 ) - 1 = 2/6 x0 + 3/6 x1 - 21/6 - x4.ub = 1/2 ( 2x0 + 3x1 - 15 ) + 1 = x0 + 1.5x1 - 6.5 - - x5 range: [1, 1] - x5.lb = 1 - x5.ub = 1 - - Layer 2: - - x6.lb = 1 ( 2/6 x0 + 3/6 x1 - 21/6 ) - 1 ( 1 ) = 2/6 x0 + 3/6 x1 - 27/6 : [-16/6, 0] - x6.ub = 1 ( x0 + 1.5x1 - 6.5 ) - 1 ( 1 ) = x0 + 1.5x1 - 7.5 : [-2, 6] - - x6 range: [-8/3, 6] - */ - - List expectedBounds( { - Tightening( 2, -4, Tightening::LB ), - Tightening( 2, 12, Tightening::UB ), - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), - Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), - Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2.6667, Tightening::LB ), - Tightening( 6, 6, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_signs_active_and_externally_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - tableau.getBoundManager().initialize( 7 ); - nlr.setTableau( &tableau ); - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 3; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Weights - nlr.setWeight( 0, 0, 1, 0, 2 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, 3 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - - tableau.setLowerBound( 0, 4 ); - tableau.setUpperBound( 0, 6 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 5 ); - - // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. - nlr.setBias( 1, 0, -15 ); - - // However, the weighted sum variable has been eliminated - nlr.eliminateVariable( 2, -3 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [4, 6] - x1: [1, 5] - - Layer 1: - - x2 is eliminated, everything set to -3 - - x3.lb = x0 + x1 : [5, 11] - x3.ub = x0 + x1 : [5, 11] - - First sign is negative, bounds become constant -1 - Second sign is positive, bounds become constant 1 - - x4: all set to -1 - - x5: all set to 1 - - Layer 2: - - x6.lb = 1 ( -1 ) - 1 ( 1 ) = -2 - x6.ub = 1 ( -1 ) - 1 ( 1 ) = -2 - */ - - List expectedBounds( { - // x2 does not appear, because it has been eliminated - - Tightening( 3, 5, Tightening::LB ), - Tightening( 3, 11, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), - Tightening( 4, -1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), - Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), - Tightening( 6, -2, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_leaky_relu() - { - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTLeakyReLU( nlr, tableau ); // alpha = 0.2 - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - x1: [-1, 1] - - Layer 1: - - x2.lb = x0 + x1 : [-2, 2] - x2.ub = x0 + x1 : [-2, 2] - - x3.lb = x0 - x1 : [-2, 2] - x3.ub = x0 - x1 : [-2, 2] - - Both LeakyReLUs are undecided, bounds are concretized. - Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 - Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 - - x4.lb = x0 + x1 - x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 - x4 range: [-0.4, 2] - - x5.lb = x0 - x1 - x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 - x5 range: [-0.4, 2] - - Layer 2: - - x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] - x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] - x6 range: [-2, 2.8] - - x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] - x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] - x7 range: [-2.8, 2.8] - - Both LeakyReLUs are undecided, bounds are concretized. - Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 - Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 - - Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 - Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 - - x8.lb = 2x0 - x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 - x8 range: [-0.4, 2.8] - - x9.lb = 0.4x0 + 1.6x1 - 0.8 - x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 - x9 range: [-0.56, 2.8] - - Layer 3: - - x10.lb = 1 ( 0.4x0 + 1.6x1 - 0.8 ) + 1 ( 2x0 ) + 1 = 2.4x0 + 1.6x1 + 0.2 : [-3.8, 5.2] - x10.ub = 1 ( -0.24 x0 + 0.96 x1 + 1.6 ) + 1 ( 0.8x0 + 2 ) + 1 = 0.56x0 + 0.96x1 + 4.6 : - [3.08, 6.12] x10 range: [-3.8, 6.12] - - x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] - x11.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] - x11 range: [-2.8, 2.8] - - */ - - List expectedBounds( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), - Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), - - Tightening( 8, -0.4, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), - Tightening( 9, -0.56, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), - - Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 6.12, Tightening::UB ), - Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_sigmoids_and_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSigmoidsAndRound( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - // Layer 1 - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 0 ), -2, 0.00001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 0 ), 2, 0.00001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getLb( 1 ), -2, 0.00001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 1 )->getUb( 1 ), 2, 0.00001 ) ); - - // Layer 2 - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 0 ), 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 0 ), 0.8807, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getLb( 1 ), 0.1192, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 2 )->getUb( 1 ), 0.8807, 0.0001 ) ); - - // Layer 3 - /* - Double-check with Python - --- - from math import exp as e - def g(x): - return 1 / (1 + e(-x)) - - def g_prime(x): - return g(x) * (1 - g(x)) - - def lam(l, u): - return (g(u) - g(l)) / (u - l) - - def lam_prime(l, u): - return min(g_prime(l), g_prime(u)) - - l3 = l4 = -2 - u3 = u4 = 2 - l5 = l6 = g(-2) - u5 = u6 = g(2) - lambda7 = lam(l3, u3) - lambda7_prime = lam_prime(l3, u3) - lambda8 = lam(l4, u4) - lambda8_prime = lam_prime(l4, u4) - x7_l = lambda7_prime * (-2) + g(-2) + g(-2) - lambda7_prime * (-2 + -2) - x7_u = lambda7_prime * (2) + g(2) + g(2) -lambda7_prime * (2 + 2) - x8_l = lambda8_prime * (-2) + g(-2) - g(2) - lambda8_prime * (-2 - 2) - x8_u = lambda8_prime * (2) + g(2) - g(-2) -lambda8_prime * (2 - -2) - print(x7_l) - print(x7_u) - print(x8_l) - print(x8_u) - --- - [output]: - 0.4483930148512481 - 1.5516069851487517 - -0.5516069851487517 - 0.5516069851487517 - */ - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 0 ), 0.4483, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 0 ), 1.5516, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getLb( 1 ), -0.5516, 0.0001 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 3 )->getUb( 1 ), 0.5516, 0.0001 ) ); - - // Layer 4 - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 0 ), 0 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 0 ), 2 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); - TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); - } - - void test_sbt_max_not_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTMax( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [-1, 1] - x1: [-1, 2] - - Layer 1: - - x2.lb = x0 + x1 : [-2, 3] - x2.ub = x0 + x1 : [-2, 3] - - x3.lb = x0 - x1 : [-3, 2] - x3.ub = x0 - x1 : [-3, 2] - - Both ReLUs are undecided, bounds are concretized. - Coefficient (first ReLU): 3/( 3--2 ) = 3/5 = 0.6 - Coefficient (second ReLU): 2/( 2--3 ) = 2/5 = 0.4 - - x4.lb = 0.6 ( x0 + x1 ) = 0.6x0 + 0.6x1 - x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 - x4 range: [0, 3] - - x5.lb = 0.4 ( x0 - x1 ) = 0.4x0 + 0.4x1 - x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 - x5 range: [0, 2] - - Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub - Max inherits lower bound from x4, and its upper bound is constant 3. - - x6.lb = 0.6x0 + 0.6x1 : [-1.2, 1.8] - x6.ub = 3 : [3, 3] - x6 range: [-1.2, 3] - - Layer 3: - - x7.lb = 2 ( 0.6x0 + 0.6x1 ) = 1.2x0 + 1.8x1 : [-2.4, 3.6] - x7.ub = 2 ( 3 ) = 6 : [6, 6] - x7 range: [-2.4, 6] - */ - - List expectedBounds( { - Tightening( 2, -2, Tightening::LB ), - Tightening( 2, 3, Tightening::UB ), - Tightening( 3, -3, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), - Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -2.4, Tightening::LB ), - Tightening( 7, 6, Tightening::UB ), - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_max_fixed() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTMax( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -3 ); - tableau.setUpperBound( 1, -2 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 2] - x1: [-3, -2] - - Layer 1: - - x2.lb = x0 + x1 : [-2, 0] - x2.ub = x0 + x1 : [-2, 0] - - x3.lb = x0 - x1 : [3, 5] - x3.ub = x0 - x1 : [3, 5] - - First ReLU is negative, bounds become constant 0 - Second ReLU is positive, bounds survive the activation - - x4: all set to 0 - - x5.lb = x0 - x1 : [3, 5] - x5.ub = x0 - x1 : [3, 5] - - Max is fixed because x5.lb > x4.ub, it inherits x5's bounds - - x6.lb = x0 - x1 : [3, 5] - x6.ub = x0 - x1 : [3, 5] - - Layer 3: - - x7.lb = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] - x7.ub = 2 ( x0 - x1 ) = 2x0 - 2x1 : [6, 10] - */ - - List expectedBounds( { - Tightening( 2, -2, Tightening::LB ), - Tightening( 2, 0, Tightening::UB ), - Tightening( 3, 3, Tightening::LB ), - Tightening( 3, 5, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), - Tightening( 4, 0, Tightening::UB ), - Tightening( 5, 3, Tightening::LB ), - Tightening( 5, 5, Tightening::UB ), - Tightening( 6, 3, Tightening::LB ), - Tightening( 6, 5, Tightening::UB ), - Tightening( 7, 6, Tightening::LB ), - Tightening( 7, 10, Tightening::UB ), - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_sbt_softmax1() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - } - - void test_sbt_softmax2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - { - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.000001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.000001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.000001 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 1.0001] - x1: [1, 1.0001] - x2: [1, 1.0001] - */ - List expectedBounds( { Tightening( 3, 2, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0, Tightening::UB ), - Tightening( 6, 0.2595, Tightening::LB ), - Tightening( 6, 0.2595, Tightening::UB ), - Tightening( 7, 0.7054, Tightening::LB ), - Tightening( 7, 0.7054, Tightening::UB ), - Tightening( 8, 0.0351, Tightening::LB ), - Tightening( 8, 0.0351, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), - Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), - Tightening( 10, -1, Tightening::UB ) - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - { - Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "er" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.000001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.000001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.000001 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 1.0001] - x1: [1, 1.0001] - x2: [1, 1.0001] - */ - List expectedBounds( { Tightening( 3, 2, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0, Tightening::UB ), - Tightening( 6, 0.2595, Tightening::LB ), - Tightening( 6, 0.2595, Tightening::UB ), - Tightening( 7, 0.7054, Tightening::LB ), - Tightening( 7, 0.7054, Tightening::UB ), - Tightening( 8, 0.0351, Tightening::LB ), - Tightening( 8, 0.0351, Tightening::UB ), - Tightening( 9, 1, Tightening::LB ), - Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), - Tightening( 10, -1, Tightening::UB ) - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - } - - void test_sbt_softmax3() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTSoftmax2( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 1.00001 ); - tableau.setLowerBound( 1, 1 ); - tableau.setUpperBound( 1, 1.00001 ); - tableau.setLowerBound( 2, 1 ); - tableau.setUpperBound( 2, 1.00001 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 1.0001] - x1: [1, 1.0001] - x2: [1, 1.0001] - */ - - List expectedBounds( - { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), - Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), - Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), - Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), - Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), - Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), - Tightening( 13, 0.9470, Tightening::LB ), Tightening( 13, 0.9470, Tightening::UB ), - Tightening( 14, -0.9470, Tightening::LB ), Tightening( 14, -0.9470, Tightening::UB ), - Tightening( 15, 1.0253, Tightening::LB ), Tightening( 15, 1.0253, Tightening::UB ), - Tightening( 16, -1.0253, Tightening::LB ), Tightening( 16, -1.0253, Tightening::UB ) - - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_softmax_bounds_er() - { - Vector inputLb = { -1, 0, 1 }; - Vector inputUb = { 0, 2, 4 }; - Vector input = { -0.5, 1, 2.5 }; - - double value = NLR::DeepPolySoftmaxElement::ERLowerBound( input, inputLb, inputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0114799, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.00563867, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERLowerBound( input, inputLb, inputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.000838421, 0.00001 ) ); - - - Vector outputLb = { 0.2, 0, 0 }; - Vector outputUb = { 0.4, 0.1, 0.1 }; - - value = NLR::DeepPolySoftmaxElement::ERUpperBound( input, outputLb, outputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, -1.44538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 1.96538, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dERUpperBound( input, outputLb, outputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.358535, 0.00001 ) ); - } - - void test_softmax_bounds_lse1() - { - Vector inputLb = { -1, 0, 1 }; - Vector inputUb = { 0, 2, 3 }; - Vector input = { -0.5, 1, 2 }; - double value = NLR::DeepPolySoftmaxElement::LSELowerBound( input, inputLb, inputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.0365, 0.001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSELowerBound( input, inputLb, inputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.00703444, 0.001 ) ); - - Vector outputLb = { 0.2, 0, 0 }; - Vector outputUb = { 0.4, 0.1, 0.1 }; - value = NLR::DeepPolySoftmaxElement::LSEUpperBound( input, outputLb, outputUb, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.164165, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 0 ); - TS_ASSERT( FloatUtils::areEqual( value, 0.272204, 0.00001 ) ); - value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); - TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); - } - - void test_sbt_bilinear() - { - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkSBTBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 1 ); - tableau.setUpperBound( 0, 2 ); - tableau.setLowerBound( 1, -2 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); - - /* - Input ranges: - - x0: [1, 2] - x1: [-2, 1] - - Layer 1: - - x2.lb = x0 - 2x1 : [-1, 6] - x2.ub = x0 - 2x1 : [-1, 6] - - x3.lb = x0 + x1 : [-1, 3] - x3.ub = x0 + x1 : [-1, 3] - - Coefficients for bilinear layer: - Lower bound: - alpha_l = x3.lb = -1 - beta = x2.lb = -1 - gamma_l = -x2.lb * x3.lb = --1 * -1 = -1 - - Upper bound: - alpha_u = x3.ub = 3 - beta = x2.lb = -1 - gamma_u = -x2.lb * x3.ub = --1 * 3 = 3 - - x4.lb = -1 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + -1 = -2x0 + x1 - 1 : [-7, -2] - x4.ub = 3 ( x0 - 2x1 ) + -1 ( x0 + x1 ) + 3 = 2x0 - 7x1 + 3 : [0, 21] - x4 range: [-7, 21] - - Layer 3: - - x7.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] - x7.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] - x4 range: [-21, 5] - */ - - List expectedBounds( { Tightening( 2, -1, Tightening::LB ), - Tightening( 2, 6, Tightening::UB ), - Tightening( 3, -1, Tightening::LB ), - Tightening( 3, 3, Tightening::UB ), - Tightening( 4, -7, Tightening::LB ), - Tightening( 4, 21, Tightening::UB ), - Tightening( 5, -21, Tightening::LB ), - Tightening( 5, 7, Tightening::UB ) } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - } - - void test_concretize_input_assignment() - { - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - - populateNetwork( nlr ); - - // With ReLUs, Inputs are zeros, only biases count - tableau.nextValues[0] = 0; - tableau.nextValues[1] = 0; - - Map assignment; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 4 ) ); - - TS_ASSERT( assignment.size() == 14 ); - TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 4 ) ); - - // With ReLUs, case 1 - tableau.nextValues[0] = 1; - tableau.nextValues[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 1 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 1 ) ); - - TS_ASSERT( FloatUtils::areEqual( assignment[12], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 1 ) ); - - // With ReLUs, case 2 - tableau.nextValues[0] = 1; - tableau.nextValues[1] = 2; - - TS_ASSERT_THROWS_NOTHING( nlr.concretizeInputAssignment( assignment ) ); - - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 0 ), 0 ) ); - TS_ASSERT( FloatUtils::areEqual( nlr.getLayer( 5 )->getAssignment( 1 ), 0 ) ); - - TS_ASSERT( FloatUtils::areEqual( assignment[12], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( assignment[13], 0 ) ); - } - - - void test_obtain_bound_from_ipq() - { - NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - - Query query; - query.setNumberOfVariables( 14 ); - - - // Initialize the bounds - query.setLowerBound( 0, -1 ); - query.setUpperBound( 0, 1 ); - query.setLowerBound( 1, -1 ); - query.setUpperBound( 1, 1 ); - - double large = 1000; - query.setLowerBound( 2, -large ); - query.setUpperBound( 2, large ); - query.setLowerBound( 3, -large ); - query.setUpperBound( 3, large ); - query.setLowerBound( 4, -large ); - query.setUpperBound( 4, large ); - query.setLowerBound( 5, -large ); - query.setUpperBound( 5, large ); - query.setLowerBound( 6, -large ); - query.setUpperBound( 6, large ); - query.setLowerBound( 7, -large ); - query.setUpperBound( 7, large ); - query.setLowerBound( 8, -large ); - query.setUpperBound( 8, large ); - query.setLowerBound( 9, -large ); - query.setUpperBound( 9, large ); - query.setLowerBound( 10, -large ); - query.setUpperBound( 10, large ); - query.setLowerBound( 11, -large ); - query.setUpperBound( 11, large ); - query.setLowerBound( 12, -large ); - query.setUpperBound( 12, large ); - query.setLowerBound( 13, -large ); - query.setUpperBound( 13, large ); - - // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds( query ) ); - - // Perform the tightening pass - TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 11, 0, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), - - Tightening( 12, 0, Tightening::LB ), Tightening( 12, 7, Tightening::UB ), - Tightening( 13, 0, Tightening::LB ), Tightening( 13, 28, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - - TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); - for ( const auto &bound : expectedBounds ) - TS_ASSERT( bounds.exists( bound ) ); - } - - void test_backwards_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 8, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 19, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 20, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sigmoid() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), - Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), - - Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), - Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), - - Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), - Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), - - Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), - Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), - Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - - Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), - Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), - - Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), - Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), - - Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), - Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sigmoid2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), - Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), - - Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), - Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), - Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), - - Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), - Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), - Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), - - Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), - Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), - - Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), - Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), - - Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), - Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), - Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), - - Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), - Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), - Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), - - Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), - Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), - Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), - - Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), - Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), - - Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), - Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), - - Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_abs() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), - - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), - - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_abs2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), - - Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), - - Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 11, -4, Tightening::LB ), - Tightening( 11, 6, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), - - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -10, Tightening::LB ), - Tightening( 7, 5, Tightening::UB ), - - Tightening( 11, -14, Tightening::LB ), - Tightening( 11, 11, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_round2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), - Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), - Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), - - Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), - Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), - - Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), - Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), - - Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), - Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), - Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), - Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), - - Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sign2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 4, -0.1, Tightening::LB ), - - Tightening( 8, -0.045, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), - - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_leaky_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyRelu2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), - - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), - - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.3, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - Tightening( 10, -0.6, Tightening::LB ), - - Tightening( 14, -0.9, Tightening::LB ), - Tightening( 15, -0.89, Tightening::LB ), - Tightening( 16, -0.77, Tightening::LB ), - - Tightening( 19, -2.3133, Tightening::LB ), - Tightening( 20, -1.2, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), - - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), - - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.5, Tightening::LB ), - Tightening( 9, -0.6, Tightening::LB ), - Tightening( 10, -1.5, Tightening::LB ), - - Tightening( 14, -1.1, Tightening::LB ), - Tightening( 15, -2.1771, Tightening::LB ), - Tightening( 16, -1.15, Tightening::LB ), - - Tightening( 19, -5.6259, Tightening::LB ), - Tightening( 20, -1.9, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_softmax_and_max() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), - Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), - Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), - - Tightening( 8, -0.1519, Tightening::LB ), Tightening( 8, 1.4775, Tightening::UB ), - Tightening( 9, 0.6614, Tightening::LB ), Tightening( 9, 2.3899, Tightening::UB ), - - Tightening( 10, 0.6614, Tightening::LB ), Tightening( 10, 2.3899, Tightening::UB ), - - Tightening( 11, -2.3899, Tightening::LB ), Tightening( 11, -0.6614, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.0674, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), - - Tightening( 10, 0.0674, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), - - Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.0674, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_softmax_and_max2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), - Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), - Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), - Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), - - Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9635, Tightening::UB ), - Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9806, Tightening::UB ), - Tightening( 13, -2.9902, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), - - Tightening( 14, 0.1086, Tightening::LB ), Tightening( 14, 0.9971, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8766, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4595, Tightening::UB ), - - Tightening( 17, 0.1086, Tightening::LB ), Tightening( 17, 0.9971, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), - - Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9989, Tightening::UB ), - Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), - Tightening( 13, -2.9989, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), - - Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9972, Tightening::UB ), - Tightening( 15, 0.0024, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), - - Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9972, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_relu_and_bilinear2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), - - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), - - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_preimage_approximation_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - /*List expectedBounds2( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2.1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.35, Tightening::UB ), - Tightening( 11, 1.35, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) );*/ - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - /*List expectedBounds4( - { Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), - - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) );*/ - } - - bool boundsEqual( const List &bounds, const List &expectedBounds ) - { - if ( bounds.size() != expectedBounds.size() ) - return false; - - bool allFound = true; - for ( const auto &bound : bounds ) - { - bool currentFound = false; - for ( const auto &expectedBound : expectedBounds ) - { - currentFound |= - ( bound._type == expectedBound._type && - bound._variable == expectedBound._variable && - FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); - } - allFound &= currentFound; - } - return allFound; - } - - void updateTableau( MockTableau &tableau, List &tightenings ) - { - ASSERT( tableau ); - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB ) - { - tableau.setLowerBound( tightening._variable, tightening._value ); - } - - if ( tightening._type == Tightening::UB ) - { - tableau.setUpperBound( tightening._variable, tightening._value ); - } - } - } -}; From 8d0bf198ba3b5eb45947e3a589a0bc25118c0f2c Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 20:56:50 +0200 Subject: [PATCH 31/72] Delete src/nlr/GlobalConfiguration.h --- src/nlr/GlobalConfiguration.h | 315 ---------------------------------- 1 file changed, 315 deletions(-) delete mode 100644 src/nlr/GlobalConfiguration.h diff --git a/src/nlr/GlobalConfiguration.h b/src/nlr/GlobalConfiguration.h deleted file mode 100644 index 78ab9f662e..0000000000 --- a/src/nlr/GlobalConfiguration.h +++ /dev/null @@ -1,315 +0,0 @@ -/********************* */ -/*! \file GlobalConfiguration.h - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz, Parth Shah, Derek Huang - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - - **/ - -#ifndef __GlobalConfiguration_h__ -#define __GlobalConfiguration_h__ - -#include "DivideStrategy.h" - -class GlobalConfiguration -{ -public: - static void print(); - - // The exponential moving average is calculated as - // ema = current * alpha + previous * (1 - alpha) - static const double EXPONENTIAL_MOVING_AVERAGE_ALPHA; - - // Whether to use SoI instead of Reluplex for local search for satisfying assignments - // to non-linear constraint. - static bool USE_DEEPSOI_LOCAL_SEARCH; - - // The quantity by which the score is bumped up for PLContraints not - // participating in the SoI. This promotes those constraints in the branching - // order. - static const double SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI; - - // Use the polarity metrics to decide which branch to take first in a case split - // and how to repair a ReLU constraint. - static const bool USE_POLARITY_BASED_DIRECTION_HEURISTICS; - - // The default epsilon used for comparing doubles - static const double DEFAULT_EPSILON_FOR_COMPARISONS; - - // The precision level when convering doubles to strings - static const unsigned DEFAULT_DOUBLE_TO_STRING_PRECISION; - - // How often should the main loop print statistics? - static const unsigned STATISTICS_PRINTING_FREQUENCY; - static const unsigned STATISTICS_PRINTING_FREQUENCY_GUROBI; - - // Tolerance when checking whether the value computed for a basic variable is out of bounds - static const double BOUND_COMPARISON_ADDITIVE_TOLERANCE; - static const double BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE; - - // Tolerance when checking whether a basic variable depends on a non-basic variable, by looking - // at the change column, as part of a pivot operation. - static const double PIVOT_CHANGE_COLUMN_TOLERANCE; - - // Tolerance for the difference when computing the pivot entry by column and by row - static const double PIVOT_ROW_AND_COLUMN_TOLERANCE; - - // Tolerance when checking whether a non-basic variable is eligible for being selected as the - // entering variable, by its reduced cost - static const double ENTRY_ELIGIBILITY_TOLERANCE; - - // Ratio test tolerance constants - static const double RATIO_CONSTRAINT_ADDITIVE_TOLERANCE; - static const double RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE; - static const double HARRIS_RATIO_CONSTRAINT_ADDITIVE_TOLERANCE; - static const double HARRIS_RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE; - - // Cost function tolerance constants - static const double BASIC_COSTS_ADDITIVE_TOLERANCE; - static const double BASIC_COSTS_MULTIPLICATIVE_TOLERANCE; - - // Sparse ForrestTomlin diagonal element tolerance constant - static const double SPARSE_FORREST_TOMLIN_DIAGONAL_ELEMENT_TOLERANCE; - - // Toggle use of Harris' two-pass ratio test for selecting the leaving variable - static const bool USE_HARRIS_RATIO_TEST; - - // Toggle query-preprocessing on/off. - static const bool PREPROCESS_INPUT_QUERY; - - // Assuming the preprocessor is on, toggle whether or not it will attempt to perform variable - // elimination. - static const bool PREPROCESSOR_ELIMINATE_VARIABLES; - - // Toggle whether or not PL/NL constraints will be called upon - // to add auxiliary variables and equations after preprocessing. - static const bool PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING; - static const bool NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING; - - // If the difference between a variable's lower and upper bounds is smaller than this - // threshold, the preprocessor will treat it as fixed. - static const double PREPROCESSOR_ALMOST_FIXED_THRESHOLD; - - // Maximal rounds of tightening to perform in the preprocessor to avoid non-termination. - static const unsigned PREPROCESSSING_MAX_TIGHTEING_ROUND; - - // Try to set the initial tableau assignment to an assignment that is legal with - // respect to the input network. - static const bool WARM_START; - - // The maximal number of iterations without new tree states being visited, before - // the engine performs a precision restoration. - static const unsigned MAX_ITERATIONS_WITHOUT_PROGRESS; - - // How often should the main loop check the current degradation? - static const unsigned DEGRADATION_CHECKING_FREQUENCY; - - // The threshold of degradation above which restoration is required - static const double DEGRADATION_THRESHOLD; - - // If a pivot element in a simplex iteration is smaller than this threshold, the engine will - // attempt to pick another element. - static const double ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD; - - // If true, column-merging equations are given special treatment and cause columns in the - // tableau to be merged (instead of a new row added). - static const bool USE_COLUMN_MERGING_EQUATIONS; - - // If a pivot element in a Gaussian elimination iteration is smaller than this threshold times - // the largest element in the column, the elimination engine will attempt to pick another pivot. - static const double GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD; - - // How many potential pivots should the engine inspect (at most) in every simplex iteration? - static const unsigned MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS; - - static const DivideStrategy SPLITTING_HEURISTICS; - - // The frequency to use interval splitting when largest interval splitting strategy is in use. - static const unsigned INTERVAL_SPLITTING_FREQUENCY; - - // When automatically deciding which splitting strategy to use, we use relu-splitting if - // the number of inputs is larger than this number. - static const unsigned INTERVAL_SPLITTING_THRESHOLD; - - // How often should we perform full bound tightening, on the entire contraints matrix A. - static const unsigned BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY; - - // When the row bound tightener is asked to run until saturation, it can enter an infinite loop - // due to tiny increments in bounds. This number limits the number of iterations it can perform. - static const unsigned ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS; - - // If the cost function error exceeds this threshold, it is recomputed - static const double COST_FUNCTION_ERROR_THRESHOLD; - - // Random seed for generating simulation values. - static const unsigned SIMULATION_RANDOM_SEED; - - // Random seed for EstimateVolume procedure (PreimageApproximation). - static const unsigned VOLUME_ESTIMATION_RANDOM_SEED; - - // Number of iterations for EstimateVolume procedure (PreimageApproximation). - static const unsigned VOLUME_ESTIMATION_ITERATIONS; - - // Random seed for PreimageApproximation optimization. - static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED; - - // Maximum iterations for PreimageApproximation optimization. - static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - - // Step size for PreimageApproximation optimization. - static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; - - // How often should projected steepest edge reset the reference space? - static const unsigned PSE_ITERATIONS_BEFORE_RESET; - - // An error threshold which, when crossed, causes projected steepest edge to reset the reference - // space - static const double PSE_GAMMA_ERROR_THRESHOLD; - - // PSE's Gamma function's update tolerance - static const double PSE_GAMMA_UPDATE_TOLERANCE; - - // The tolerance for checking whether f = Constraint( b ), Constraint \in { ReLU, ABS, Sign} - static const double CONSTRAINT_COMPARISON_TOLERANCE; - - // Toggle between two types of LSE lower bound for softmax - static const double SOFTMAX_LSE2_THRESHOLD; - - // Should the initial basis be comprised only of auxiliary (row) variables? - static const bool ONLY_AUX_INITIAL_BASIS; - - /* - Explicit (Reluplex-style) bound tightening options - */ - - enum ExplicitBasisBoundTighteningType { - // Compute the inverse basis matrix and use it - COMPUTE_INVERTED_BASIS_MATRIX = 0, - // Use the inverted basis matrix without using it, via transformations - USE_IMPLICIT_INVERTED_BASIS_MATRIX = 1, - // Disable explicit basis bound tightening - DISABLE_EXPLICIT_BASIS_TIGHTENING = 2, - }; - - // When doing bound tightening using the explicit basis matrix, should the basis matrix be - // inverted? - static const ExplicitBasisBoundTighteningType EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE; - static const double EXPLICIT_BASIS_BOUND_TIGHTENING_ROUNDING_CONSTANT; - - // When doing explicit bound tightening, should we repeat until saturation? - static const bool EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION; - - /* - Symbolic bound tightening options - */ - - // Symbolic tightening, LP rounding constants - static const double SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; - static const double LP_TIGHTENING_ROUNDING_CONSTANT; - - static const double SIGMOID_CUTOFF_CONSTANT; - - /* - Constraint fixing heuristics - */ - - // When a PL constraint proposes a fix that affects multiple variables, should it first query - // for any relevant linear connections between the variables? - static const bool USE_SMART_FIX; - - // A heuristic for selecting which of the broken PL constraints will be repaired next. In this - // case, the one that has been repaired the least number of times so far. - static const bool USE_LEAST_FIX; - - /* - Basis factorization options - */ - - // The number of accumualted eta matrices, after which the basis will be refactorized - static const unsigned REFACTORIZATION_THRESHOLD; - - // The kind of basis factorization algorithm in use - enum BasisFactorizationType { - LU_FACTORIZATION, - SPARSE_LU_FACTORIZATION, - FORREST_TOMLIN_FACTORIZATION, - SPARSE_FORREST_TOMLIN_FACTORIZATION, - }; - static const BasisFactorizationType BASIS_FACTORIZATION_TYPE; - - /* In the polarity-based branching heuristics, only this many earliest nodes - are considered to branch on. - */ - static const unsigned POLARITY_CANDIDATES_THRESHOLD; - - /* The max number of DnC splits - */ - static const unsigned DNC_DEPTH_THRESHOLD; - - /* Minimal coefficient of a variable in a Tableau row, that is used for bound tightening - */ - static const double MINIMAL_COEFFICIENT_FOR_TIGHTENING; - - /* The tolerance of errors when checking lemmas in the proof-checking process - */ - static const double LEMMA_CERTIFICATION_TOLERANCE; - - /* Denote whether proofs should be written as a JSON file - */ - static const bool WRITE_JSON_PROOF; - - /* How many layers after the current layer do we encode in backward analysis. - */ - static const unsigned BACKWARD_BOUND_PROPAGATION_DEPTH; - - /* How many rounds of backward analysis to perform? - */ - static const unsigned MAX_ROUNDS_OF_BACKWARD_ANALYSIS; - -#ifdef ENABLE_GUROBI - /* - The number of threads Gurobi spawns - */ - static const unsigned GUROBI_NUMBER_OF_THREADS; - static const bool GUROBI_LOGGING; -#endif // ENABLE_GUROBI - - /* - Logging options - */ - static const bool DNC_MANAGER_LOGGING; - static const bool ENGINE_LOGGING; - static const bool TABLEAU_LOGGING; - static const bool SMT_CORE_LOGGING; - static const bool DANTZIGS_RULE_LOGGING; - static const bool BASIS_FACTORIZATION_LOGGING; - static const bool PREPROCESSOR_LOGGING; - static const bool INPUT_QUERY_LOGGING; - static const bool PROJECTED_STEEPEST_EDGE_LOGGING; - static const bool GAUSSIAN_ELIMINATION_LOGGING; - static const bool QUERY_LOADER_LOGGING; - static const bool SYMBOLIC_BOUND_TIGHTENER_LOGGING; - static const bool NETWORK_LEVEL_REASONER_LOGGING; - static const bool MPS_PARSER_LOGGING; - static const bool ONNX_PARSER_LOGGING; - static const bool SOI_LOGGING; - static const bool SCORE_TRACKER_LOGGING; - static const bool CEGAR_LOGGING; -}; - -#endif // __GlobalConfiguration_h__ - -// -// Local Variables: -// compile-command: "make -C .. " -// tags-file-name: "../TAGS" -// c-basic-offset: 4 -// End: -// From 81e28b7835a2a931dbd99ddeea1c5e141eabcb80 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 20:57:48 +0200 Subject: [PATCH 32/72] Delete src/nlr/GlobalConfiguration.cpp --- src/nlr/GlobalConfiguration.cpp | 238 -------------------------------- 1 file changed, 238 deletions(-) delete mode 100644 src/nlr/GlobalConfiguration.cpp diff --git a/src/nlr/GlobalConfiguration.cpp b/src/nlr/GlobalConfiguration.cpp deleted file mode 100644 index 3a08b92234..0000000000 --- a/src/nlr/GlobalConfiguration.cpp +++ /dev/null @@ -1,238 +0,0 @@ -/********************* */ -/*! \file GlobalConfiguration.cpp - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz, Parth Shah, Derek Huang - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - - **/ - -#include "GlobalConfiguration.h" - -#include "DivideStrategy.h" -#include "MString.h" - -#include - - -// The exponential moving average is calculated as -// ema = current * alpha + previous * (1 - alpha) -const double GlobalConfiguration::EXPONENTIAL_MOVING_AVERAGE_ALPHA = 0.5; - -// Whether to use SoI instead of Reluplex for local search for satisfying assignments -// to non-linear constraint. -bool GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH = true; - -const double GlobalConfiguration::SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI = 5; - -// Use the polarity metrics to decide which branch to take first in a case split -// and how to repair a ReLU constraint. -const bool GlobalConfiguration::USE_POLARITY_BASED_DIRECTION_HEURISTICS = true; - -const double GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS = 0.0000000001; -const unsigned GlobalConfiguration::DEFAULT_DOUBLE_TO_STRING_PRECISION = 10; -const unsigned GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY = 10000; -const unsigned GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY_GUROBI = 100; -const double GlobalConfiguration::BOUND_COMPARISON_ADDITIVE_TOLERANCE = 0.0000001; -const double GlobalConfiguration::BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE = 0.001 * 0.0000001; -const double GlobalConfiguration::PIVOT_CHANGE_COLUMN_TOLERANCE = 0.000000001; -const double GlobalConfiguration::PIVOT_ROW_AND_COLUMN_TOLERANCE = 0.01; -const double GlobalConfiguration::ENTRY_ELIGIBILITY_TOLERANCE = 0.00000001; -const double GlobalConfiguration::RATIO_CONSTRAINT_ADDITIVE_TOLERANCE = 0.0000001 * 0.3; -const double GlobalConfiguration::RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE = - 0.001 * 0.0000001 * 0.3; -const double GlobalConfiguration::HARRIS_RATIO_CONSTRAINT_ADDITIVE_TOLERANCE = 0.0000001 * 0.5; -const double GlobalConfiguration::HARRIS_RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE = - 0.001 * 0.0000001 * 0.5; -const double GlobalConfiguration::BASIC_COSTS_ADDITIVE_TOLERANCE = 0.0000001; -const double GlobalConfiguration::BASIC_COSTS_MULTIPLICATIVE_TOLERANCE = 0.001 * 0.0000001; -const double GlobalConfiguration::SPARSE_FORREST_TOMLIN_DIAGONAL_ELEMENT_TOLERANCE = 0.00001; -const unsigned GlobalConfiguration::DEGRADATION_CHECKING_FREQUENCY = 100; -const double GlobalConfiguration::DEGRADATION_THRESHOLD = 0.1; -const double GlobalConfiguration::ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD = 0.0001; -const bool GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS = false; -const double GlobalConfiguration::GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD = 0.1; -const unsigned GlobalConfiguration::MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS = 5; -const DivideStrategy GlobalConfiguration::SPLITTING_HEURISTICS = DivideStrategy::ReLUViolation; -const unsigned GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY = 10; -const unsigned GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD = 10; -const unsigned GlobalConfiguration::BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY = 100; -const unsigned GlobalConfiguration::ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS = 20; -const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; - -const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 1000; -const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 100; -const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.1; -const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.05; - -const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; - -const double GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT = 0.00000000001; -const double GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT = 0.00000001; - -const double GlobalConfiguration::SIGMOID_CUTOFF_CONSTANT = 20; - -const bool GlobalConfiguration::PREPROCESS_INPUT_QUERY = true; -const bool GlobalConfiguration::PREPROCESSOR_ELIMINATE_VARIABLES = true; -const bool GlobalConfiguration::PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; -const bool GlobalConfiguration::NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; -const double GlobalConfiguration::PREPROCESSOR_ALMOST_FIXED_THRESHOLD = 0.00001; - -const unsigned GlobalConfiguration::PREPROCESSSING_MAX_TIGHTEING_ROUND = 10; - -const bool GlobalConfiguration::WARM_START = false; - -const unsigned GlobalConfiguration::MAX_ITERATIONS_WITHOUT_PROGRESS = 10000; - -const unsigned GlobalConfiguration::PSE_ITERATIONS_BEFORE_RESET = 1000; -const double GlobalConfiguration::PSE_GAMMA_ERROR_THRESHOLD = 0.001; -const double GlobalConfiguration::PSE_GAMMA_UPDATE_TOLERANCE = 0.000000001; - -const double GlobalConfiguration::CONSTRAINT_COMPARISON_TOLERANCE = 0.00001; - -const double GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD = 0.6; - -const bool GlobalConfiguration::ONLY_AUX_INITIAL_BASIS = false; - -const GlobalConfiguration::ExplicitBasisBoundTighteningType - GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE = - GlobalConfiguration::COMPUTE_INVERTED_BASIS_MATRIX; -const bool GlobalConfiguration::EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION = false; -const double GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_ROUNDING_CONSTANT = 1e-6; - -const unsigned GlobalConfiguration::REFACTORIZATION_THRESHOLD = 100; -const GlobalConfiguration::BasisFactorizationType GlobalConfiguration::BASIS_FACTORIZATION_TYPE = - GlobalConfiguration::SPARSE_FORREST_TOMLIN_FACTORIZATION; - -const unsigned GlobalConfiguration::POLARITY_CANDIDATES_THRESHOLD = 5; - -const unsigned GlobalConfiguration::DNC_DEPTH_THRESHOLD = 5; - -const double GlobalConfiguration::MINIMAL_COEFFICIENT_FOR_TIGHTENING = 0.01; -const double GlobalConfiguration::LEMMA_CERTIFICATION_TOLERANCE = 0.000001; -const bool GlobalConfiguration::WRITE_JSON_PROOF = false; - -const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; -const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; - -#ifdef ENABLE_GUROBI -const unsigned GlobalConfiguration::GUROBI_NUMBER_OF_THREADS = 1; -const bool GlobalConfiguration::GUROBI_LOGGING = false; -#endif // ENABLE_GUROBI - -// Logging - note that it is enabled only in Debug mode -const bool GlobalConfiguration::DNC_MANAGER_LOGGING = false; -const bool GlobalConfiguration::ENGINE_LOGGING = false; -const bool GlobalConfiguration::TABLEAU_LOGGING = false; -const bool GlobalConfiguration::SMT_CORE_LOGGING = false; -const bool GlobalConfiguration::DANTZIGS_RULE_LOGGING = false; -const bool GlobalConfiguration::BASIS_FACTORIZATION_LOGGING = false; -const bool GlobalConfiguration::PREPROCESSOR_LOGGING = false; -const bool GlobalConfiguration::INPUT_QUERY_LOGGING = false; -const bool GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING = false; -const bool GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING = false; -const bool GlobalConfiguration::QUERY_LOADER_LOGGING = false; -const bool GlobalConfiguration::SYMBOLIC_BOUND_TIGHTENER_LOGGING = false; -const bool GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING = false; -const bool GlobalConfiguration::MPS_PARSER_LOGGING = false; -const bool GlobalConfiguration::ONNX_PARSER_LOGGING = false; -const bool GlobalConfiguration::SOI_LOGGING = false; -const bool GlobalConfiguration::SCORE_TRACKER_LOGGING = false; -const bool GlobalConfiguration::CEGAR_LOGGING = false; - -const bool GlobalConfiguration::USE_SMART_FIX = false; -const bool GlobalConfiguration::USE_LEAST_FIX = false; - -void GlobalConfiguration::print() -{ - printf( "****************************\n" ); - printf( "*** Global Configuraiton ***\n" ); - printf( "****************************\n" ); - printf( " DEFAULT_EPSILON_FOR_COMPARISONS: %.15lf\n", DEFAULT_EPSILON_FOR_COMPARISONS ); - printf( " DEFAULT_DOUBLE_TO_STRING_PRECISION: %u\n", DEFAULT_DOUBLE_TO_STRING_PRECISION ); - printf( " STATISTICS_PRINTING_FREQUENCY: %u\n", STATISTICS_PRINTING_FREQUENCY ); - printf( " BOUND_COMPARISON_ADDITIVE_TOLERANCE: %.15lf\n", - BOUND_COMPARISON_ADDITIVE_TOLERANCE ); - printf( " BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE: %.15lf\n", - BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE ); - printf( " PIVOT_CHANGE_COLUMN_TOLERANCE: %.15lf\n", PIVOT_CHANGE_COLUMN_TOLERANCE ); - printf( " RATIO_CONSTRAINT_ADDITIVE_TOLERANCE: %.15lf\n", - RATIO_CONSTRAINT_ADDITIVE_TOLERANCE ); - printf( " RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE: %.15lf\n", - RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE ); - printf( " BASIC_COSTS_ADDITIVE_TOLERANCE: %.15lf\n", BASIC_COSTS_ADDITIVE_TOLERANCE ); - printf( " BASIC_COSTS_MULTIPLICATIVE_TOLERANCE: %.15lf\n", - BASIC_COSTS_MULTIPLICATIVE_TOLERANCE ); - printf( " DEGRADATION_CHECKING_FREQUENCY: %u\n", DEGRADATION_CHECKING_FREQUENCY ); - printf( " DEGRADATION_THRESHOLD: %.15lf\n", DEGRADATION_THRESHOLD ); - printf( " ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD: %.15lf\n", ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD ); - printf( " USE_COLUMN_MERGING_EQUATIONS: %s\n", USE_COLUMN_MERGING_EQUATIONS ? "Yes" : "No" ); - printf( " GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD: %.15lf\n", - GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD ); - printf( " MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS: %u\n", MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS ); - printf( " BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY: %u\n", - BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY ); - printf( " COST_FUNCTION_ERROR_THRESHOLD: %.15lf\n", COST_FUNCTION_ERROR_THRESHOLD ); - printf( " USE_HARRIS_RATIO_TEST: %s\n", USE_HARRIS_RATIO_TEST ? "Yes" : "No" ); - - printf( " PREPROCESS_INPUT_QUERY: %s\n", PREPROCESS_INPUT_QUERY ? "Yes" : "No" ); - printf( " PREPROCESSOR_ELIMINATE_VARIABLES: %s\n", - PREPROCESSOR_ELIMINATE_VARIABLES ? "Yes" : "No" ); - printf( " PSE_ITERATIONS_BEFORE_RESET: %u\n", PSE_ITERATIONS_BEFORE_RESET ); - printf( " PSE_GAMMA_ERROR_THRESHOLD: %.15lf\n", PSE_GAMMA_ERROR_THRESHOLD ); - printf( " CONSTRAINT_COMPARISON_TOLERANCE: %.15lf\n", CONSTRAINT_COMPARISON_TOLERANCE ); - - String basisBoundTighteningType; - switch ( EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE ) - { - case COMPUTE_INVERTED_BASIS_MATRIX: - basisBoundTighteningType = "Compute inverted basis matrix"; - break; - - case USE_IMPLICIT_INVERTED_BASIS_MATRIX: - basisBoundTighteningType = "Use implicit inverted basis matrix"; - break; - - default: - basisBoundTighteningType = "Unknown"; - break; - } - - printf( " EXPLICIT_BASIS_BOUND_TIGHTENING_INVERT_BASIS: %s\n", - basisBoundTighteningType.ascii() ); - printf( " EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION: %s\n", - EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION ? "Yes" : "No" ); - printf( " REFACTORIZATION_THRESHOLD: %u\n", REFACTORIZATION_THRESHOLD ); - - String basisFactorizationType; - if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == GlobalConfiguration::LU_FACTORIZATION ) - basisFactorizationType = "LU_FACTORIZATION"; - else if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == - GlobalConfiguration::SPARSE_LU_FACTORIZATION ) - basisFactorizationType = "SPARSE_LU_FACTORIZATION"; - else if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == - GlobalConfiguration::FORREST_TOMLIN_FACTORIZATION ) - basisFactorizationType = "FORREST_TOMLIN_FACTORIZATION"; - else - basisFactorizationType = "Unknown"; - - printf( " BASIS_FACTORIZATION_TYPE: %s\n", basisFactorizationType.ascii() ); - printf( "****************************\n" ); -} - -// -// Local Variables: -// compile-command: "make -C ../.. " -// tags-file-name: "../../TAGS" -// c-basic-offset: 4 -// End: -// From dea156a2e86159a90b57431f7d4ba114365ec379 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 20:58:07 +0200 Subject: [PATCH 33/72] Delete src/nlr/Engine.cpp --- src/nlr/Engine.cpp | 3813 -------------------------------------------- 1 file changed, 3813 deletions(-) delete mode 100644 src/nlr/Engine.cpp diff --git a/src/nlr/Engine.cpp b/src/nlr/Engine.cpp deleted file mode 100644 index e137c3cbb9..0000000000 --- a/src/nlr/Engine.cpp +++ /dev/null @@ -1,3813 +0,0 @@ -/********************* */ -/*! \file Engine.cpp - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz, Duligur Ibeling, Andrew Wu, Omri Isac - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** \brief [[ Add one-line brief description here ]] - ** - ** [[ Add lengthier description here ]] - **/ - -#include "Engine.h" - -#include "AutoConstraintMatrixAnalyzer.h" -#include "Debug.h" -#include "DisjunctionConstraint.h" -#include "EngineState.h" -#include "InfeasibleQueryException.h" -#include "MStringf.h" -#include "MalformedBasisException.h" -#include "MarabouError.h" -#include "NLRError.h" -#include "PiecewiseLinearConstraint.h" -#include "Preprocessor.h" -#include "Query.h" -#include "TableauRow.h" -#include "TimeUtils.h" -#include "VariableOutOfBoundDuringOptimizationException.h" -#include "Vector.h" - -#include - -Engine::Engine() - : _context() - , _boundManager( _context ) - , _tableau( _boundManager ) - , _preprocessedQuery( nullptr ) - , _rowBoundTightener( *_tableau ) - , _smtCore( this ) - , _numPlConstraintsDisabledByValidSplits( 0 ) - , _preprocessingEnabled( false ) - , _initialStateStored( false ) - , _work( NULL ) - , _basisRestorationRequired( Engine::RESTORATION_NOT_NEEDED ) - , _basisRestorationPerformed( Engine::NO_RESTORATION_PERFORMED ) - , _costFunctionManager( _tableau ) - , _quitRequested( false ) - , _exitCode( Engine::NOT_DONE ) - , _numVisitedStatesAtPreviousRestoration( 0 ) - , _networkLevelReasoner( NULL ) - , _verbosity( Options::get()->getInt( Options::VERBOSITY ) ) - , _lastNumVisitedStates( 0 ) - , _lastIterationWithProgress( 0 ) - , _symbolicBoundTighteningType( Options::get()->getSymbolicBoundTighteningType() ) - , _solveWithMILP( Options::get()->getBool( Options::SOLVE_WITH_MILP ) ) - , _lpSolverType( Options::get()->getLPSolverType() ) - , _gurobi( nullptr ) - , _milpEncoder( nullptr ) - , _soiManager( nullptr ) - , _simulationSize( Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ) ) - , _isGurobyEnabled( Options::get()->gurobiEnabled() ) - , _performLpTighteningAfterSplit( - Options::get()->getBool( Options::PERFORM_LP_TIGHTENING_AFTER_SPLIT ) ) - , _milpSolverBoundTighteningType( Options::get()->getMILPSolverBoundTighteningType() ) - , _sncMode( false ) - , _queryId( "" ) - , _produceUNSATProofs( Options::get()->getBool( Options::PRODUCE_PROOFS ) ) - , _groundBoundManager( _context ) - , _UNSATCertificate( NULL ) -{ - _smtCore.setStatistics( &_statistics ); - _tableau->setStatistics( &_statistics ); - _rowBoundTightener->setStatistics( &_statistics ); - _preprocessor.setStatistics( &_statistics ); - - _activeEntryStrategy = _projectedSteepestEdgeRule; - _activeEntryStrategy->setStatistics( &_statistics ); - _statistics.stampStartingTime(); - setRandomSeed( Options::get()->getInt( Options::SEED ) ); - - _boundManager.registerEngine( this ); - _groundBoundManager.registerEngine( this ); - _statisticsPrintingFrequency = ( _lpSolverType == LPSolverType::NATIVE ) - ? GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY - : GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY_GUROBI; - - _UNSATCertificateCurrentPointer = - _produceUNSATProofs ? new ( true ) - CVC4::context::CDO( &_context, NULL ) - : NULL; -} - -Engine::~Engine() -{ - if ( _work ) - { - delete[] _work; - _work = NULL; - } - - if ( _UNSATCertificate ) - { - delete _UNSATCertificate; - _UNSATCertificate = NULL; - } - - if ( _produceUNSATProofs && _UNSATCertificateCurrentPointer ) - _UNSATCertificateCurrentPointer->deleteSelf(); -} - -void Engine::setVerbosity( unsigned verbosity ) -{ - _verbosity = verbosity; -} - -void Engine::adjustWorkMemorySize() -{ - if ( _work ) - { - delete[] _work; - _work = NULL; - } - - _work = new double[_tableau->getM()]; - if ( !_work ) - throw MarabouError( MarabouError::ALLOCATION_FAILED, "Engine::work" ); -} - -void Engine::applySnCSplit( PiecewiseLinearCaseSplit sncSplit, String queryId ) -{ - _sncMode = true; - _sncSplit = sncSplit; - _queryId = queryId; - preContextPushHook(); - _smtCore.pushContext(); - applySplit( sncSplit ); - _boundManager.propagateTightenings(); -} - -bool Engine::inSnCMode() const -{ - return _sncMode; -} - -void Engine::setRandomSeed( unsigned seed ) -{ - srand( seed ); -} - -Query Engine::prepareSnCQuery() -{ - List bounds = _sncSplit.getBoundTightenings(); - List equations = _sncSplit.getEquations(); - - Query sncIPQ = *_preprocessedQuery; - for ( auto &equation : equations ) - sncIPQ.addEquation( equation ); - - for ( auto &bound : bounds ) - { - switch ( bound._type ) - { - case Tightening::LB: - sncIPQ.setLowerBound( bound._variable, bound._value ); - break; - - case Tightening::UB: - sncIPQ.setUpperBound( bound._variable, bound._value ); - } - } - - return sncIPQ; -} - -void Engine::exportQueryWithError( String errorMessage ) -{ - String ipqFileName = ( _queryId.length() > 0 ) ? _queryId + ".ipq" : "failedInputQuery.ipq"; - prepareSnCQuery().saveQuery( ipqFileName ); - printf( "Engine: %s!\nInput query has been saved as %s. Please attach the input query when you " - "open the issue on GitHub.\n", - errorMessage.ascii(), - ipqFileName.ascii() ); -} - -bool Engine::solve( double timeoutInSeconds ) -{ - SignalHandler::getInstance()->initialize(); - SignalHandler::getInstance()->registerClient( this ); - - // Register the boundManager with all the PL constraints - for ( auto &plConstraint : _plConstraints ) - plConstraint->registerBoundManager( &_boundManager ); - for ( auto &nlConstraint : _nlConstraints ) - nlConstraint->registerBoundManager( &_boundManager ); - - // Before encoding, make sure all valid constraints are applied. - applyAllValidConstraintCaseSplits(); - - if ( _solveWithMILP ) - return solveWithMILPEncoding( timeoutInSeconds ); - - updateDirections(); - if ( _lpSolverType == LPSolverType::NATIVE ) - storeInitialEngineState(); - else if ( _lpSolverType == LPSolverType::GUROBI ) - { - ENGINE_LOG( "Encoding convex relaxation into Gurobi..." ); - _gurobi = std::unique_ptr( new GurobiWrapper() ); - _tableau->setGurobi( &( *_gurobi ) ); - _milpEncoder = std::unique_ptr( new MILPEncoder( *_tableau ) ); - _milpEncoder->setStatistics( &_statistics ); - _milpEncoder->encodeQuery( *_gurobi, *_preprocessedQuery, true ); - ENGINE_LOG( "Encoding convex relaxation into Gurobi - done" ); - } - - mainLoopStatistics(); - if ( _verbosity > 0 ) - { - printf( "\nEngine::solve: Initial statistics\n" ); - _statistics.print(); - printf( "\n---\n" ); - } - - bool splitJustPerformed = true; - struct timespec mainLoopStart = TimeUtils::sampleMicro(); - while ( true ) - { - struct timespec mainLoopEnd = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, - TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); - mainLoopStart = mainLoopEnd; - - if ( shouldExitDueToTimeout( timeoutInSeconds ) ) - { - if ( _verbosity > 0 ) - { - printf( "\n\nEngine: quitting due to timeout...\n\n" ); - printf( "Final statistics:\n" ); - _statistics.print(); - } - - _exitCode = Engine::TIMEOUT; - _statistics.timeout(); - return false; - } - - if ( _quitRequested ) - { - if ( _verbosity > 0 ) - { - printf( "\n\nEngine: quitting due to external request...\n\n" ); - printf( "Final statistics:\n" ); - _statistics.print(); - } - - _exitCode = Engine::QUIT_REQUESTED; - return false; - } - - try - { - DEBUG( _tableau->verifyInvariants() ); - - mainLoopStatistics(); - if ( _verbosity > 1 && - _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % - _statisticsPrintingFrequency == - 0 ) - _statistics.print(); - - if ( _lpSolverType == LPSolverType::NATIVE ) - { - checkOverallProgress(); - // Check whether progress has been made recently - - if ( performPrecisionRestorationIfNeeded() ) - continue; - - if ( _tableau->basisMatrixAvailable() ) - { - explicitBasisBoundTightening(); - _boundManager.propagateTightenings(); - applyAllValidConstraintCaseSplits(); - } - } - - // If true, we just entered a new subproblem - if ( splitJustPerformed ) - { - performBoundTighteningAfterCaseSplit(); - informLPSolverOfBounds(); - splitJustPerformed = false; - } - - // Perform any SmtCore-initiated case splits - if ( _smtCore.needToSplit() ) - { - _smtCore.performSplit(); - splitJustPerformed = true; - continue; - } - - if ( !_tableau->allBoundsValid() ) - { - // Some variable bounds are invalid, so the query is unsat - throw InfeasibleQueryException(); - } - - if ( allVarsWithinBounds() ) - { - // It's possible that a disjunction constraint is fixed and additional constraints - // are introduced, making the linear portion unsatisfied. So we need to make sure - // there are no valid case splits that we do not know of. - applyAllBoundTightenings(); - if ( applyAllValidConstraintCaseSplits() ) - continue; - - // The linear portion of the problem has been solved. - // Check the status of the PL constraints - bool solutionFound = adjustAssignmentToSatisfyNonLinearConstraints(); - if ( solutionFound ) - { - if ( allNonlinearConstraintsHold() ) - { - mainLoopEnd = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( - Statistics::TIME_MAIN_LOOP_MICRO, - TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); - if ( _verbosity > 0 ) - { - printf( "\nEngine::solve: sat assignment found\n" ); - _statistics.print(); - } - - // Allows checking proofs produced for UNSAT leaves of satisfiable query - // search tree - if ( _produceUNSATProofs ) - { - ASSERT( _UNSATCertificateCurrentPointer ); - ( **_UNSATCertificateCurrentPointer ).setSATSolutionFlag(); - } - _exitCode = Engine::SAT; - return true; - } - else if ( !hasBranchingCandidate() ) - { - mainLoopEnd = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( - Statistics::TIME_MAIN_LOOP_MICRO, - TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); - if ( _verbosity > 0 ) - { - printf( "\nEngine::solve: at leaf node but solving inconclusive\n" ); - _statistics.print(); - } - _exitCode = Engine::UNKNOWN; - return false; - } - else - { - while ( !_smtCore.needToSplit() ) - _smtCore.reportRejectedPhasePatternProposal(); - continue; - } - } - else - { - continue; - } - } - - // We have out-of-bounds variables. - if ( _lpSolverType == LPSolverType::NATIVE ) - performSimplexStep(); - else - { - ENGINE_LOG( "Checking LP feasibility with Gurobi..." ); - DEBUG( { checkGurobiBoundConsistency(); } ); - ASSERT( _lpSolverType == LPSolverType::GUROBI ); - LinearExpression dontCare; - minimizeCostWithGurobi( dontCare ); - } - continue; - } - catch ( const MalformedBasisException & ) - { - _tableau->toggleOptimization( false ); - if ( !handleMalformedBasisException() ) - { - ASSERT( _lpSolverType == LPSolverType::NATIVE ); - _exitCode = Engine::ERROR; - exportQueryWithError( "Cannot restore tableau" ); - mainLoopEnd = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, - TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); - return false; - } - } - catch ( const InfeasibleQueryException & ) - { - _tableau->toggleOptimization( false ); - // The current query is unsat, and we need to pop. - // If we're at level 0, the whole query is unsat. - if ( _produceUNSATProofs ) - explainSimplexFailure(); - - if ( !_smtCore.popSplit() ) - { - mainLoopEnd = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, - TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); - if ( _verbosity > 0 ) - { - printf( "\nEngine::solve: unsat query\n" ); - _statistics.print(); - } - _exitCode = Engine::UNSAT; - return false; - } - else - { - splitJustPerformed = true; - } - } - catch ( const VariableOutOfBoundDuringOptimizationException & ) - { - _tableau->toggleOptimization( false ); - continue; - } - catch ( MarabouError &e ) - { - String message = Stringf( - "Caught a MarabouError. Code: %u. Message: %s ", e.getCode(), e.getUserMessage() ); - _exitCode = Engine::ERROR; - exportQueryWithError( message ); - mainLoopEnd = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, - TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); - return false; - } - catch ( ... ) - { - _exitCode = Engine::ERROR; - exportQueryWithError( "Unknown error" ); - mainLoopEnd = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, - TimeUtils::timePassed( mainLoopStart, mainLoopEnd ) ); - return false; - } - } -} - -void Engine::mainLoopStatistics() -{ - struct timespec start = TimeUtils::sampleMicro(); - - unsigned activeConstraints = 0; - for ( const auto &constraint : _plConstraints ) - if ( constraint->isActive() ) - ++activeConstraints; - - _statistics.setUnsignedAttribute( Statistics::NUM_ACTIVE_PL_CONSTRAINTS, activeConstraints ); - _statistics.setUnsignedAttribute( Statistics::NUM_PL_VALID_SPLITS, - _numPlConstraintsDisabledByValidSplits ); - _statistics.setUnsignedAttribute( Statistics::NUM_PL_SMT_ORIGINATED_SPLITS, - _plConstraints.size() - activeConstraints - - _numPlConstraintsDisabledByValidSplits ); - - _statistics.incLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_HANDLING_STATISTICS_MICRO, - TimeUtils::timePassed( start, end ) ); -} - -void Engine::performBoundTighteningAfterCaseSplit() -{ - // Tighten bounds of a first hidden layer with MILP solver - performMILPSolverBoundedTighteningForSingleLayer( 1 ); - do - { - performSymbolicBoundTightening(); - } - while ( applyAllValidConstraintCaseSplits() ); - - // Tighten bounds of an output layer with MILP solver - if ( _networkLevelReasoner ) // to avoid failing of system test. - performMILPSolverBoundedTighteningForSingleLayer( - _networkLevelReasoner->getLayerIndexToLayer().size() - 1 ); -} - -bool Engine::adjustAssignmentToSatisfyNonLinearConstraints() -{ - ENGINE_LOG( "Linear constraints satisfied. Now trying to satisfy non-linear" - " constraints..." ); - collectViolatedPlConstraints(); - - // If all constraints are satisfied, we are possibly done - if ( allPlConstraintsHold() ) - { - if ( _lpSolverType == LPSolverType::NATIVE && - _tableau->getBasicAssignmentStatus() != ITableau::BASIC_ASSIGNMENT_JUST_COMPUTED ) - { - if ( _verbosity > 0 ) - { - printf( "Before declaring sat, recomputing...\n" ); - } - // Make sure that the assignment is precise before declaring success - _tableau->computeAssignment(); - // If we actually have a real satisfying assignment, - return false; - } - else - return true; - } - else if ( !GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH ) - { - // We have violated piecewise-linear constraints. - performConstraintFixingStep(); - - // Finally, take this opporunity to tighten any bounds - // and perform any valid case splits. - tightenBoundsOnConstraintMatrix(); - _boundManager.propagateTightenings(); - // For debugging purposes - checkBoundCompliancyWithDebugSolution(); - - while ( applyAllValidConstraintCaseSplits() ) - performSymbolicBoundTightening(); - return false; - } - else - { - return performDeepSoILocalSearch(); - } -} - -bool Engine::performPrecisionRestorationIfNeeded() -{ - // If the basis has become malformed, we need to restore it - if ( basisRestorationNeeded() ) - { - if ( _basisRestorationRequired == Engine::STRONG_RESTORATION_NEEDED ) - { - performPrecisionRestoration( PrecisionRestorer::RESTORE_BASICS ); - _basisRestorationPerformed = Engine::PERFORMED_STRONG_RESTORATION; - } - else - { - performPrecisionRestoration( PrecisionRestorer::DO_NOT_RESTORE_BASICS ); - _basisRestorationPerformed = Engine::PERFORMED_WEAK_RESTORATION; - } - - _numVisitedStatesAtPreviousRestoration = - _statistics.getUnsignedAttribute( Statistics::NUM_VISITED_TREE_STATES ); - _basisRestorationRequired = Engine::RESTORATION_NOT_NEEDED; - return true; - } - - // Restoration is not required - _basisRestorationPerformed = Engine::NO_RESTORATION_PERFORMED; - - // Possible restoration due to preceision degradation - if ( shouldCheckDegradation() && highDegradation() ) - { - performPrecisionRestoration( PrecisionRestorer::RESTORE_BASICS ); - return true; - } - - return false; -} - -bool Engine::handleMalformedBasisException() -{ - // Debug - printf( "MalformedBasisException caught!\n" ); - // - - if ( _basisRestorationPerformed == Engine::NO_RESTORATION_PERFORMED ) - { - if ( _numVisitedStatesAtPreviousRestoration != - _statistics.getUnsignedAttribute( Statistics::NUM_VISITED_TREE_STATES ) ) - { - // We've tried a strong restoration before, and it didn't work. Do a weak restoration - _basisRestorationRequired = Engine::WEAK_RESTORATION_NEEDED; - } - else - { - _basisRestorationRequired = Engine::STRONG_RESTORATION_NEEDED; - } - return true; - } - else if ( _basisRestorationPerformed == Engine::PERFORMED_STRONG_RESTORATION ) - { - _basisRestorationRequired = Engine::WEAK_RESTORATION_NEEDED; - return true; - } - else - return false; -} - -void Engine::performConstraintFixingStep() -{ - // Statistics - _statistics.incLongAttribute( Statistics::NUM_CONSTRAINT_FIXING_STEPS ); - struct timespec start = TimeUtils::sampleMicro(); - - // Select a violated constraint as the target - selectViolatedPlConstraint(); - - // Report the violated constraint to the SMT engine - reportPlViolation(); - - // Attempt to fix the constraint - fixViolatedPlConstraintIfPossible(); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_CONSTRAINT_FIXING_STEPS_MICRO, - TimeUtils::timePassed( start, end ) ); -} - -bool Engine::performSimplexStep() -{ - // Statistics - _statistics.incLongAttribute( Statistics::NUM_SIMPLEX_STEPS ); - struct timespec start = TimeUtils::sampleMicro(); - - /* - In order to increase numerical stability, we attempt to pick a - "good" entering/leaving combination, by trying to avoid tiny pivot - values. We do this as follows: - - 1. Pick an entering variable according to the strategy in use. - 2. Find the entailed leaving variable. - 3. If the combination is bad, go back to (1) and find the - next-best entering variable. - */ - - if ( _tableau->isOptimizing() ) - _costFunctionManager->computeGivenCostFunction( _heuristicCost._addends ); - if ( _costFunctionManager->costFunctionInvalid() ) - _costFunctionManager->computeCoreCostFunction(); - else - _costFunctionManager->adjustBasicCostAccuracy(); - - DEBUG( { - // Since we're performing a simplex step, there are out-of-bounds variables. - // Therefore, if the cost function is fresh, it should not be zero. - if ( _costFunctionManager->costFunctionJustComputed() ) - { - const double *costFunction = _costFunctionManager->getCostFunction(); - unsigned size = _tableau->getN() - _tableau->getM(); - bool found = false; - for ( unsigned i = 0; i < size; ++i ) - { - if ( !FloatUtils::isZero( costFunction[i] ) ) - { - found = true; - break; - } - } - - if ( !found ) - { - printf( "Error! Have OOB vars but cost function is zero.\n" - "Recomputing cost function. New one is:\n" ); - _costFunctionManager->computeCoreCostFunction(); - _costFunctionManager->dumpCostFunction(); - throw MarabouError( MarabouError::DEBUGGING_ERROR, - "Have OOB vars but cost function is zero" ); - } - } - } ); - - // Obtain all eligible entering variables - List enteringVariableCandidates; - _tableau->getEntryCandidates( enteringVariableCandidates ); - - unsigned bestLeaving = 0; - double bestChangeRatio = 0.0; - Set excludedEnteringVariables; - bool haveCandidate = false; - unsigned bestEntering = 0; - double bestPivotEntry = 0.0; - unsigned tries = GlobalConfiguration::MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS; - - while ( tries > 0 ) - { - --tries; - - // Attempt to pick the best entering variable from the available candidates - if ( !_activeEntryStrategy->select( - _tableau, enteringVariableCandidates, excludedEnteringVariables ) ) - { - // No additional candidates can be found. - break; - } - - // We have a candidate! - haveCandidate = true; - - // We don't want to re-consider this candidate in future - // iterations - excludedEnteringVariables.insert( _tableau->getEnteringVariableIndex() ); - - // Pick a leaving variable - _tableau->computeChangeColumn(); - _tableau->pickLeavingVariable(); - - // A fake pivot always wins - if ( _tableau->performingFakePivot() ) - { - bestEntering = _tableau->getEnteringVariableIndex(); - bestLeaving = _tableau->getLeavingVariableIndex(); - bestChangeRatio = _tableau->getChangeRatio(); - memcpy( _work, _tableau->getChangeColumn(), sizeof( double ) * _tableau->getM() ); - break; - } - - // Is the newly found pivot better than the stored one? - unsigned leavingIndex = _tableau->getLeavingVariableIndex(); - double pivotEntry = FloatUtils::abs( _tableau->getChangeColumn()[leavingIndex] ); - if ( pivotEntry > bestPivotEntry ) - { - bestEntering = _tableau->getEnteringVariableIndex(); - bestPivotEntry = pivotEntry; - bestLeaving = leavingIndex; - bestChangeRatio = _tableau->getChangeRatio(); - memcpy( _work, _tableau->getChangeColumn(), sizeof( double ) * _tableau->getM() ); - } - - // If the pivot is greater than the sought-after threshold, we - // are done. - if ( bestPivotEntry >= GlobalConfiguration::ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD ) - break; - else - _statistics.incLongAttribute( - Statistics::NUM_SIMPLEX_PIVOT_SELECTIONS_IGNORED_FOR_STABILITY ); - } - - // If we don't have any candidates, this simplex step has failed. - if ( !haveCandidate ) - { - if ( _tableau->getBasicAssignmentStatus() != ITableau::BASIC_ASSIGNMENT_JUST_COMPUTED ) - { - // This failure might have resulted from a corrupt basic assignment. - _tableau->computeAssignment(); - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, - TimeUtils::timePassed( start, end ) ); - return false; - } - else if ( !_costFunctionManager->costFunctionJustComputed() ) - { - // This failure might have resulted from a corrupt cost function. - ASSERT( _costFunctionManager->getCostFunctionStatus() == - ICostFunctionManager::COST_FUNCTION_UPDATED ); - _costFunctionManager->invalidateCostFunction(); - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, - TimeUtils::timePassed( start, end ) ); - return false; - } - else - { - // Cost function is fresh --- failure is real. - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, - TimeUtils::timePassed( start, end ) ); - if ( _tableau->isOptimizing() ) - { - // The current solution is optimal. - return true; - } - else - throw InfeasibleQueryException(); - } - } - - // Set the best choice in the tableau - _tableau->setEnteringVariableIndex( bestEntering ); - _tableau->setLeavingVariableIndex( bestLeaving ); - _tableau->setChangeColumn( _work ); - _tableau->setChangeRatio( bestChangeRatio ); - - bool fakePivot = _tableau->performingFakePivot(); - - if ( !fakePivot && bestPivotEntry < GlobalConfiguration::ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD ) - { - /* - Despite our efforts, we are stuck with a small pivot. If basis factorization - isn't fresh, refresh it and terminate this step - perhaps in the next iteration - a better pivot will be found - */ - if ( !_tableau->basisMatrixAvailable() ) - { - _tableau->refreshBasisFactorization(); - return false; - } - - _statistics.incLongAttribute( Statistics::NUM_SIMPLEX_UNSTABLE_PIVOTS ); - } - - if ( !fakePivot ) - { - _tableau->computePivotRow(); - _rowBoundTightener->examinePivotRow(); - } - - // Perform the actual pivot - _activeEntryStrategy->prePivotHook( _tableau, fakePivot ); - _tableau->performPivot(); - _activeEntryStrategy->postPivotHook( _tableau, fakePivot ); - _boundManager.propagateTightenings(); - _costFunctionManager->invalidateCostFunction(); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, - TimeUtils::timePassed( start, end ) ); - return false; -} - -void Engine::fixViolatedPlConstraintIfPossible() -{ - List fixes; - - if ( GlobalConfiguration::USE_SMART_FIX ) - fixes = _plConstraintToFix->getSmartFixes( _tableau ); - else - fixes = _plConstraintToFix->getPossibleFixes(); - - // First, see if we can fix without pivoting. We are looking for a fix concerning a - // non-basic variable, that doesn't set that variable out-of-bounds. - for ( const auto &fix : fixes ) - { - if ( !_tableau->isBasic( fix._variable ) ) - { - if ( _tableau->checkValueWithinBounds( fix._variable, fix._value ) ) - { - _tableau->setNonBasicAssignment( fix._variable, fix._value, true ); - return; - } - } - } - - // No choice, have to pivot. Look for a fix concerning a basic variable, that - // doesn't set that variable out-of-bounds. If smart-fix is enabled and implemented, - // we should probably not reach this point. - bool found = false; - auto it = fixes.begin(); - while ( !found && it != fixes.end() ) - { - if ( _tableau->isBasic( it->_variable ) ) - { - if ( _tableau->checkValueWithinBounds( it->_variable, it->_value ) ) - { - found = true; - } - } - if ( !found ) - { - ++it; - } - } - - // If we couldn't find an eligible fix, give up - if ( !found ) - return; - - PiecewiseLinearConstraint::Fix fix = *it; - ASSERT( _tableau->isBasic( fix._variable ) ); - - TableauRow row( _tableau->getN() - _tableau->getM() ); - _tableau->getTableauRow( _tableau->variableToIndex( fix._variable ), &row ); - - // Pick the variable with the largest coefficient in this row for pivoting, - // to increase numerical stability. - unsigned bestCandidate = row._row[0]._var; - double bestValue = FloatUtils::abs( row._row[0]._coefficient ); - - unsigned n = _tableau->getN(); - unsigned m = _tableau->getM(); - for ( unsigned i = 1; i < n - m; ++i ) - { - double contenderValue = FloatUtils::abs( row._row[i]._coefficient ); - if ( FloatUtils::gt( contenderValue, bestValue ) ) - { - bestValue = contenderValue; - bestCandidate = row._row[i]._var; - } - } - - if ( FloatUtils::isZero( bestValue ) ) - { - // This can happen, e.g., if we have an equation x = 5, and is legal behavior. - return; - } - - // Switch between nonBasic and the variable we need to fix - _tableau->setEnteringVariableIndex( _tableau->variableToIndex( bestCandidate ) ); - _tableau->setLeavingVariableIndex( _tableau->variableToIndex( fix._variable ) ); - - // Make sure the change column and pivot row are up-to-date - strategies - // such as projected steepest edge need these for their internal updates. - _tableau->computeChangeColumn(); - _tableau->computePivotRow(); - - _activeEntryStrategy->prePivotHook( _tableau, false ); - _tableau->performDegeneratePivot(); - _activeEntryStrategy->postPivotHook( _tableau, false ); - - ASSERT( !_tableau->isBasic( fix._variable ) ); - _tableau->setNonBasicAssignment( fix._variable, fix._value, true ); -} - -bool Engine::processInputQuery( const IQuery &inputQuery ) -{ - return processInputQuery( inputQuery, GlobalConfiguration::PREPROCESS_INPUT_QUERY ); -} - -bool Engine::calculateBounds( const IQuery &inputQuery ) -{ - ENGINE_LOG( "calculateBounds starting\n" ); - struct timespec start = TimeUtils::sampleMicro(); - - try - { - invokePreprocessor( inputQuery, true ); - if ( _verbosity > 1 ) - printInputBounds( inputQuery ); - - initializeNetworkLevelReasoning(); - - performSymbolicBoundTightening( &( *_preprocessedQuery ) ); - performSimulation(); - performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); - performAdditionalBackwardAnalysisIfNeeded(); - - if ( _networkLevelReasoner && Options::get()->getBool( Options::DUMP_BOUNDS ) ) - _networkLevelReasoner->dumpBounds(); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.setLongAttribute( Statistics::CALCULATE_BOUNDS_TIME_MICRO, - TimeUtils::timePassed( start, end ) ); - if ( !_tableau->allBoundsValid() ) - { - // Some variable bounds are invalid, so the query is unsat - throw InfeasibleQueryException(); - } - } - catch ( const InfeasibleQueryException & ) - { - ENGINE_LOG( "calculateBounds done\n" ); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.setLongAttribute( Statistics::CALCULATE_BOUNDS_TIME_MICRO, - TimeUtils::timePassed( start, end ) ); - - _exitCode = Engine::UNSAT; - printf( "unsat\n" ); - - return false; - } - - ENGINE_LOG( "calculateBounds done\n" ); - - return true; -} - -void Engine::invokePreprocessor( const IQuery &inputQuery, bool preprocess ) -{ - if ( _verbosity > 0 ) - printf( "Engine::processInputQuery: Input query (before preprocessing): " - "%u equations, %u variables\n", - inputQuery.getNumberOfEquations(), - inputQuery.getNumberOfVariables() ); - - // If processing is enabled, invoke the preprocessor - _preprocessingEnabled = preprocess; - if ( _preprocessingEnabled ) - _preprocessedQuery = _preprocessor.preprocess( - inputQuery, GlobalConfiguration::PREPROCESSOR_ELIMINATE_VARIABLES ); - else - { - _preprocessedQuery = std::unique_ptr( inputQuery.generateQuery() ); - Preprocessor().informConstraintsOfInitialBounds( *_preprocessedQuery ); - } - - if ( _verbosity > 0 ) - printf( "Engine::processInputQuery: Input query (after preprocessing): " - "%u equations, %u variables\n\n", - _preprocessedQuery->getNumberOfEquations(), - _preprocessedQuery->getNumberOfVariables() ); - - unsigned infiniteBounds = _preprocessedQuery->countInfiniteBounds(); - if ( infiniteBounds != 0 ) - { - _exitCode = Engine::ERROR; - throw MarabouError( MarabouError::UNBOUNDED_VARIABLES_NOT_YET_SUPPORTED, - Stringf( "Error! Have %u infinite bounds", infiniteBounds ).ascii() ); - } -} - -void Engine::printInputBounds( const IQuery &inputQuery ) const -{ - printf( "Input bounds:\n" ); - for ( unsigned i = 0; i < inputQuery.getNumInputVariables(); ++i ) - { - unsigned variable = inputQuery.inputVariableByIndex( i ); - double lb, ub; - bool fixed = false; - if ( _preprocessingEnabled ) - { - // Fixed variables are easy: return the value they've been fixed to. - if ( _preprocessor.variableIsFixed( variable ) ) - { - fixed = true; - lb = _preprocessor.getFixedValue( variable ); - ub = lb; - } - else - { - // Has the variable been merged into another? - while ( _preprocessor.variableIsMerged( variable ) ) - variable = _preprocessor.getMergedIndex( variable ); - - // We know which variable to look for, but it may have been assigned - // a new index, due to variable elimination - variable = _preprocessor.getNewIndex( variable ); - - lb = _preprocessedQuery->getLowerBound( variable ); - ub = _preprocessedQuery->getUpperBound( variable ); - } - } - else - { - lb = inputQuery.getLowerBound( variable ); - ub = inputQuery.getUpperBound( variable ); - } - - printf( "\tx%u: [%8.4lf, %8.4lf] %s\n", i, lb, ub, fixed ? "[FIXED]" : "" ); - } - printf( "\n" ); -} - -void Engine::storeEquationsInDegradationChecker() -{ - _degradationChecker.storeEquations( *_preprocessedQuery ); -} - -double *Engine::createConstraintMatrix() -{ - const List &equations( _preprocessedQuery->getEquations() ); - unsigned m = equations.size(); - unsigned n = _preprocessedQuery->getNumberOfVariables(); - - // Step 1: create a constraint matrix from the equations - double *constraintMatrix = new double[n * m]; - if ( !constraintMatrix ) - throw MarabouError( MarabouError::ALLOCATION_FAILED, "Engine::constraintMatrix" ); - std::fill_n( constraintMatrix, n * m, 0.0 ); - - unsigned equationIndex = 0; - for ( const auto &equation : equations ) - { - if ( equation._type != Equation::EQ ) - { - _exitCode = Engine::ERROR; - throw MarabouError( MarabouError::NON_EQUALITY_INPUT_EQUATION_DISCOVERED ); - } - - for ( const auto &addend : equation._addends ) - constraintMatrix[equationIndex * n + addend._variable] = addend._coefficient; - - ++equationIndex; - } - - return constraintMatrix; -} - -void Engine::removeRedundantEquations( const double *constraintMatrix ) -{ - const List &equations( _preprocessedQuery->getEquations() ); - unsigned m = equations.size(); - unsigned n = _preprocessedQuery->getNumberOfVariables(); - - // Step 1: analyze the matrix to identify redundant rows - AutoConstraintMatrixAnalyzer analyzer; - analyzer->analyze( constraintMatrix, m, n ); - - ENGINE_LOG( - Stringf( "Number of redundant rows: %u out of %u", analyzer->getRedundantRows().size(), m ) - .ascii() ); - - // Step 2: remove any equations corresponding to redundant rows - Set redundantRows = analyzer->getRedundantRows(); - - if ( !redundantRows.empty() ) - { - _preprocessedQuery->removeEquationsByIndex( redundantRows ); - m = equations.size(); - } -} - -void Engine::selectInitialVariablesForBasis( const double *constraintMatrix, - List &initialBasis, - List &basicRows ) -{ - /* - This method permutes rows and columns in the constraint matrix (prior - to the addition of auxiliary variables), in order to obtain a set of - column that constitue a lower triangular matrix. The variables - corresponding to the columns of this matrix join the initial basis. - - (It is possible that not enough variables are obtained this way, in which - case the initial basis will have to be augmented later). - */ - - const List &equations( _preprocessedQuery->getEquations() ); - - unsigned m = equations.size(); - unsigned n = _preprocessedQuery->getNumberOfVariables(); - - // Trivial case, or if a trivial basis is requested - if ( ( m == 0 ) || ( n == 0 ) || GlobalConfiguration::ONLY_AUX_INITIAL_BASIS ) - { - for ( unsigned i = 0; i < m; ++i ) - basicRows.append( i ); - - return; - } - - unsigned *nnzInRow = new unsigned[m]; - unsigned *nnzInColumn = new unsigned[n]; - - std::fill_n( nnzInRow, m, 0 ); - std::fill_n( nnzInColumn, n, 0 ); - - unsigned *columnOrdering = new unsigned[n]; - unsigned *rowOrdering = new unsigned[m]; - - for ( unsigned i = 0; i < m; ++i ) - rowOrdering[i] = i; - - for ( unsigned i = 0; i < n; ++i ) - columnOrdering[i] = i; - - // Initialize the counters - for ( unsigned i = 0; i < m; ++i ) - { - for ( unsigned j = 0; j < n; ++j ) - { - if ( !FloatUtils::isZero( constraintMatrix[i * n + j] ) ) - { - ++nnzInRow[i]; - ++nnzInColumn[j]; - } - } - } - - DEBUG( { - for ( unsigned i = 0; i < m; ++i ) - { - ASSERT( nnzInRow[i] > 0 ); - } - } ); - - unsigned numExcluded = 0; - unsigned numTriangularRows = 0; - unsigned temp; - - while ( numExcluded + numTriangularRows < n ) - { - // Do we have a singleton row? - unsigned singletonRow = m; - for ( unsigned i = numTriangularRows; i < m; ++i ) - { - if ( nnzInRow[i] == 1 ) - { - singletonRow = i; - break; - } - } - - if ( singletonRow < m ) - { - // Have a singleton row! Swap it to the top and update counters - temp = rowOrdering[singletonRow]; - rowOrdering[singletonRow] = rowOrdering[numTriangularRows]; - rowOrdering[numTriangularRows] = temp; - - temp = nnzInRow[numTriangularRows]; - nnzInRow[numTriangularRows] = nnzInRow[singletonRow]; - nnzInRow[singletonRow] = temp; - - // Find the non-zero entry in the row and swap it to the diagonal - DEBUG( bool foundNonZero = false ); - for ( unsigned i = numTriangularRows; i < n - numExcluded; ++i ) - { - if ( !FloatUtils::isZero( constraintMatrix[rowOrdering[numTriangularRows] * n + - columnOrdering[i]] ) ) - { - temp = columnOrdering[i]; - columnOrdering[i] = columnOrdering[numTriangularRows]; - columnOrdering[numTriangularRows] = temp; - - temp = nnzInColumn[numTriangularRows]; - nnzInColumn[numTriangularRows] = nnzInColumn[i]; - nnzInColumn[i] = temp; - - DEBUG( foundNonZero = true ); - break; - } - } - - ASSERT( foundNonZero ); - - // Remove all entries under the diagonal entry from the row counters - for ( unsigned i = numTriangularRows + 1; i < m; ++i ) - { - if ( !FloatUtils::isZero( constraintMatrix[rowOrdering[i] * n + - columnOrdering[numTriangularRows]] ) ) - --nnzInRow[i]; - } - - ++numTriangularRows; - } - else - { - // No singleton rows. Exclude the densest column - unsigned maxDensity = nnzInColumn[numTriangularRows]; - unsigned column = numTriangularRows; - - for ( unsigned i = numTriangularRows; i < n - numExcluded; ++i ) - { - if ( nnzInColumn[i] > maxDensity ) - { - maxDensity = nnzInColumn[i]; - column = i; - } - } - - // Update the row counters to account for the excluded column - for ( unsigned i = numTriangularRows; i < m; ++i ) - { - double element = constraintMatrix[rowOrdering[i] * n + columnOrdering[column]]; - if ( !FloatUtils::isZero( element ) ) - { - ASSERT( nnzInRow[i] > 1 ); - --nnzInRow[i]; - } - } - - columnOrdering[column] = columnOrdering[n - 1 - numExcluded]; - nnzInColumn[column] = nnzInColumn[n - 1 - numExcluded]; - ++numExcluded; - } - } - - // Final basis: diagonalized columns + non-diagonalized rows - List result; - - for ( unsigned i = 0; i < numTriangularRows; ++i ) - { - initialBasis.append( columnOrdering[i] ); - } - - for ( unsigned i = numTriangularRows; i < m; ++i ) - { - basicRows.append( rowOrdering[i] ); - } - - // Cleanup - delete[] nnzInRow; - delete[] nnzInColumn; - delete[] columnOrdering; - delete[] rowOrdering; -} - -void Engine::addAuxiliaryVariables() -{ - List &equations( _preprocessedQuery->getEquations() ); - - unsigned m = equations.size(); - unsigned originalN = _preprocessedQuery->getNumberOfVariables(); - unsigned n = originalN + m; - - _preprocessedQuery->setNumberOfVariables( n ); - - // Add auxiliary variables to the equations and set their bounds - unsigned count = 0; - for ( auto &eq : equations ) - { - unsigned auxVar = originalN + count; - if ( _produceUNSATProofs ) - _preprocessedQuery->_lastAddendToAux.insert( eq._addends.back()._variable, auxVar ); - eq.addAddend( -1, auxVar ); - _preprocessedQuery->setLowerBound( auxVar, eq._scalar ); - _preprocessedQuery->setUpperBound( auxVar, eq._scalar ); - eq.setScalar( 0 ); - - ++count; - } -} - -void Engine::augmentInitialBasisIfNeeded( List &initialBasis, - const List &basicRows ) -{ - unsigned m = _preprocessedQuery->getEquations().size(); - unsigned n = _preprocessedQuery->getNumberOfVariables(); - unsigned originalN = n - m; - - if ( initialBasis.size() != m ) - { - for ( const auto &basicRow : basicRows ) - initialBasis.append( basicRow + originalN ); - } -} - -void Engine::initializeTableau( const double *constraintMatrix, const List &initialBasis ) -{ - const List &equations( _preprocessedQuery->getEquations() ); - unsigned m = equations.size(); - unsigned n = _preprocessedQuery->getNumberOfVariables(); - - _tableau->setDimensions( m, n ); - - adjustWorkMemorySize(); - - unsigned equationIndex = 0; - for ( const auto &equation : equations ) - { - _tableau->setRightHandSide( equationIndex, equation._scalar ); - ++equationIndex; - } - - // Populate constriant matrix - _tableau->setConstraintMatrix( constraintMatrix ); - - _tableau->registerToWatchAllVariables( _rowBoundTightener ); - _tableau->registerResizeWatcher( _rowBoundTightener ); - - _rowBoundTightener->setDimensions(); - - initializeBoundsAndConstraintWatchersInTableau( n ); - - _tableau->initializeTableau( initialBasis ); - - _costFunctionManager->initialize(); - _tableau->registerCostFunctionManager( _costFunctionManager ); - _activeEntryStrategy->initialize( _tableau ); -} - -void Engine::initializeBoundsAndConstraintWatchersInTableau( unsigned numberOfVariables ) -{ - _plConstraints = _preprocessedQuery->getPiecewiseLinearConstraints(); - for ( const auto &constraint : _plConstraints ) - { - constraint->registerAsWatcher( _tableau ); - constraint->setStatistics( &_statistics ); - - // Assuming aux var is use, add the constraint's auxiliary variable assigned to it in the - // tableau, to the constraint - if ( _produceUNSATProofs ) - for ( unsigned var : constraint->getNativeAuxVars() ) - if ( _preprocessedQuery->_lastAddendToAux.exists( var ) ) - constraint->addTableauAuxVar( _preprocessedQuery->_lastAddendToAux.at( var ), - var ); - } - - _nlConstraints = _preprocessedQuery->getNonlinearConstraints(); - for ( const auto &constraint : _nlConstraints ) - { - constraint->registerAsWatcher( _tableau ); - constraint->setStatistics( &_statistics ); - } - - for ( unsigned i = 0; i < numberOfVariables; ++i ) - { - _tableau->setLowerBound( i, _preprocessedQuery->getLowerBound( i ) ); - _tableau->setUpperBound( i, _preprocessedQuery->getUpperBound( i ) ); - } - _boundManager.storeLocalBounds(); - - _statistics.setUnsignedAttribute( Statistics::NUM_PL_CONSTRAINTS, _plConstraints.size() ); -} - -void Engine::initializeNetworkLevelReasoning() -{ - _networkLevelReasoner = _preprocessedQuery->getNetworkLevelReasoner(); - - if ( _networkLevelReasoner ) - { - _networkLevelReasoner->computeSuccessorLayers(); - _networkLevelReasoner->setTableau( _tableau ); - if ( Options::get()->getBool( Options::DUMP_TOPOLOGY ) ) - { - _networkLevelReasoner->dumpTopology( false ); - std::cout << std::endl; - } - } -} - -bool Engine::processInputQuery( const IQuery &inputQuery, bool preprocess ) -{ - ENGINE_LOG( "processInputQuery starting\n" ); - struct timespec start = TimeUtils::sampleMicro(); - - try - { - invokePreprocessor( inputQuery, preprocess ); - if ( _verbosity > 1 ) - printInputBounds( inputQuery ); - initializeNetworkLevelReasoning(); - if ( preprocess ) - { - performSymbolicBoundTightening( &( *_preprocessedQuery ) ); - performSimulation(); - performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); - performAdditionalBackwardAnalysisIfNeeded(); - } - - if ( GlobalConfiguration::PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING ) - for ( auto &plConstraint : _preprocessedQuery->getPiecewiseLinearConstraints() ) - plConstraint->addAuxiliaryEquationsAfterPreprocessing( *_preprocessedQuery ); - - if ( GlobalConfiguration::NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING ) - for ( auto &nlConstraint : _preprocessedQuery->getNonlinearConstraints() ) - nlConstraint->addAuxiliaryEquationsAfterPreprocessing( *_preprocessedQuery ); - - if ( _produceUNSATProofs ) - { - for ( auto &plConstraint : _preprocessedQuery->getPiecewiseLinearConstraints() ) - { - if ( !UNSATCertificateUtils::getSupportedActivations().exists( - plConstraint->getType() ) ) - { - _produceUNSATProofs = false; - Options::get()->setBool( Options::PRODUCE_PROOFS, false ); - String activationType = - plConstraint->serializeToString().tokenize( "," ).back(); - printf( - "Turning off proof production since activation %s is not yet supported\n", - activationType.ascii() ); - break; - } - } - } - - if ( _lpSolverType == LPSolverType::NATIVE ) - { - double *constraintMatrix = createConstraintMatrix(); - removeRedundantEquations( constraintMatrix ); - - // The equations have changed, recreate the constraint matrix - delete[] constraintMatrix; - constraintMatrix = createConstraintMatrix(); - - List initialBasis; - List basicRows; - selectInitialVariablesForBasis( constraintMatrix, initialBasis, basicRows ); - addAuxiliaryVariables(); - augmentInitialBasisIfNeeded( initialBasis, basicRows ); - - storeEquationsInDegradationChecker(); - - // The equations have changed, recreate the constraint matrix - delete[] constraintMatrix; - constraintMatrix = createConstraintMatrix(); - - unsigned n = _preprocessedQuery->getNumberOfVariables(); - _boundManager.initialize( n ); - - initializeTableau( constraintMatrix, initialBasis ); - _boundManager.initializeBoundExplainer( n, _tableau->getM() ); - delete[] constraintMatrix; - - if ( _produceUNSATProofs ) - { - _UNSATCertificate = new UnsatCertificateNode( NULL, PiecewiseLinearCaseSplit() ); - _UNSATCertificateCurrentPointer->set( _UNSATCertificate ); - _UNSATCertificate->setVisited(); - _groundBoundManager.initialize( n ); - - for ( unsigned i = 0; i < n; ++i ) - { - _groundBoundManager.setUpperBound( i, _preprocessedQuery->getUpperBound( i ) ); - _groundBoundManager.setLowerBound( i, _preprocessedQuery->getLowerBound( i ) ); - } - } - } - else - { - ASSERT( _lpSolverType == LPSolverType::GUROBI ); - - ASSERT( GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH == true ); - - if ( _verbosity > 0 ) - printf( "Using Gurobi to solve LP...\n" ); - - unsigned n = _preprocessedQuery->getNumberOfVariables(); - unsigned m = _preprocessedQuery->getEquations().size(); - // Only use BoundManager to store the bounds. - _boundManager.initialize( n ); - _tableau->setDimensions( m, n ); - initializeBoundsAndConstraintWatchersInTableau( n ); - } - - for ( const auto &constraint : _plConstraints ) - constraint->registerTableau( _tableau ); - for ( const auto &constraint : _nlConstraints ) - constraint->registerTableau( _tableau ); - - if ( _networkLevelReasoner && Options::get()->getBool( Options::DUMP_BOUNDS ) ) - _networkLevelReasoner->dumpBounds(); - - if ( GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH ) - { - _soiManager = std::unique_ptr( - new SumOfInfeasibilitiesManager( *_preprocessedQuery, *_tableau ) ); - _soiManager->setStatistics( &_statistics ); - } - - if ( GlobalConfiguration::WARM_START ) - warmStart(); - - decideBranchingHeuristics(); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.setLongAttribute( Statistics::PREPROCESSING_TIME_MICRO, - TimeUtils::timePassed( start, end ) ); - - if ( !_tableau->allBoundsValid() ) - { - // Some variable bounds are invalid, so the query is unsat - throw InfeasibleQueryException(); - } - } - catch ( const InfeasibleQueryException & ) - { - ENGINE_LOG( "processInputQuery done\n" ); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.setLongAttribute( Statistics::PREPROCESSING_TIME_MICRO, - TimeUtils::timePassed( start, end ) ); - - _exitCode = Engine::UNSAT; - return false; - } - - ENGINE_LOG( "processInputQuery done\n" ); - - DEBUG( { - // Initially, all constraints should be active - for ( const auto &plc : _plConstraints ) - { - ASSERT( plc->isActive() ); - } - } ); - - _smtCore.storeDebuggingSolution( _preprocessedQuery->_debuggingSolution ); - return true; -} - -void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) -{ - if ( _networkLevelReasoner && Options::get()->gurobiEnabled() ) - { - // Obtain from and store bounds into inputquery if it is not null. - if ( inputQuery ) - _networkLevelReasoner->obtainCurrentBounds( *inputQuery ); - else - _networkLevelReasoner->obtainCurrentBounds(); - - // TODO: Remove this block after getting ready to support sigmoid with MILP Bound - // Tightening. - if ( ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::MILP_ENCODING || - _milpSolverBoundTighteningType == - MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL || - _milpSolverBoundTighteningType == - MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION ) && - _preprocessedQuery->getNonlinearConstraints().size() > 0 ) - throw MarabouError( MarabouError::FEATURE_NOT_YET_SUPPORTED, - "Marabou doesn't support sigmoid with MILP Bound Tightening" ); - - switch ( _milpSolverBoundTighteningType ) - { - case MILPSolverBoundTighteningType::LP_RELAXATION: - case MILPSolverBoundTighteningType::LP_RELAXATION_INCREMENTAL: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: - _networkLevelReasoner->lpRelaxationPropagation(); - break; - case MILPSolverBoundTighteningType::MILP_ENCODING: - case MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL: - _networkLevelReasoner->MILPPropagation(); - break; - case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: - _networkLevelReasoner->iterativePropagation(); - break; - case MILPSolverBoundTighteningType::NONE: - return; - } - List tightenings; - _networkLevelReasoner->getConstraintTightenings( tightenings ); - - - if ( inputQuery ) - { - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB && - FloatUtils::gt( tightening._value, - inputQuery->getLowerBound( tightening._variable ) ) ) - inputQuery->setLowerBound( tightening._variable, tightening._value ); - if ( tightening._type == Tightening::UB && - FloatUtils::lt( tightening._value, - inputQuery->getUpperBound( tightening._variable ) ) ) - inputQuery->setUpperBound( tightening._variable, tightening._value ); - } - } - else - { - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB ) - _tableau->tightenLowerBound( tightening._variable, tightening._value ); - - else if ( tightening._type == Tightening::UB ) - _tableau->tightenUpperBound( tightening._variable, tightening._value ); - } - } - } -} - -void Engine::performAdditionalBackwardAnalysisIfNeeded() -{ - if ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE || - _milpSolverBoundTighteningType == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE ) - { - unsigned tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); - if ( _verbosity > 0 ) - printf( "Backward analysis tightened %u bounds\n", tightened ); - while ( tightened && - _milpSolverBoundTighteningType == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE && - GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS ) - { - performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); - tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); - if ( _verbosity > 0 ) - printf( "Backward analysis tightened %u bounds\n", tightened ); - } - } -} - -void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIndex ) -{ - if ( _produceUNSATProofs ) - return; - - if ( _networkLevelReasoner && _isGurobyEnabled && _performLpTighteningAfterSplit && - _milpSolverBoundTighteningType != MILPSolverBoundTighteningType::NONE ) - { - _networkLevelReasoner->obtainCurrentBounds(); - _networkLevelReasoner->clearConstraintTightenings(); - - switch ( _milpSolverBoundTighteningType ) - { - case MILPSolverBoundTighteningType::LP_RELAXATION: - _networkLevelReasoner->LPTighteningForOneLayer( targetIndex ); - break; - case MILPSolverBoundTighteningType::LP_RELAXATION_INCREMENTAL: - return; - case MILPSolverBoundTighteningType::MILP_ENCODING: - _networkLevelReasoner->MILPTighteningForOneLayer( targetIndex ); - break; - case MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL: - return; - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: - case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: - case MILPSolverBoundTighteningType::NONE: - return; - } - List tightenings; - _networkLevelReasoner->getConstraintTightenings( tightenings ); - - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB ) - _tableau->tightenLowerBound( tightening._variable, tightening._value ); - - else if ( tightening._type == Tightening::UB ) - _tableau->tightenUpperBound( tightening._variable, tightening._value ); - } - } -} - -void Engine::extractSolution( IQuery &inputQuery, Preprocessor *preprocessor ) -{ - Preprocessor *preprocessorInUse = nullptr; - if ( preprocessor != nullptr ) - preprocessorInUse = preprocessor; - else if ( _preprocessingEnabled ) - preprocessorInUse = &_preprocessor; - - for ( unsigned i = 0; i < inputQuery.getNumberOfVariables(); ++i ) - { - if ( preprocessorInUse ) - { - // Symbolically fixed variables are skipped. They will be re-constructed in the end. - if ( preprocessorInUse->variableIsUnusedAndSymbolicallyFixed( i ) ) - continue; - - // Has the variable been merged into another? - unsigned variable = i; - while ( preprocessorInUse->variableIsMerged( variable ) ) - variable = preprocessorInUse->getMergedIndex( variable ); - - // Fixed variables are easy: return the value they've been fixed to. - if ( preprocessorInUse->variableIsFixed( variable ) ) - { - inputQuery.setSolutionValue( i, preprocessorInUse->getFixedValue( variable ) ); - continue; - } - - // We know which variable to look for, but it may have been assigned - // a new index, due to variable elimination - variable = preprocessorInUse->getNewIndex( variable ); - - // Finally, set the assigned value - inputQuery.setSolutionValue( i, _tableau->getValue( variable ) ); - } - else - { - inputQuery.setSolutionValue( i, _tableau->getValue( i ) ); - } - } - - if ( preprocessorInUse ) - { - /* - Recover the assignment of eliminated neurons (e.g., due to merging of WS layers) - */ - preprocessorInUse->setSolutionValuesOfEliminatedNeurons( inputQuery ); - } -} - -bool Engine::allVarsWithinBounds() const -{ - if ( _lpSolverType == LPSolverType::GUROBI ) - { - ASSERT( _gurobi ); - return _gurobi->haveFeasibleSolution(); - } - else - return !_tableau->existsBasicOutOfBounds(); -} - -void Engine::collectViolatedPlConstraints() -{ - _violatedPlConstraints.clear(); - for ( const auto &constraint : _plConstraints ) - { - if ( constraint->isActive() && !constraint->satisfied() ) - _violatedPlConstraints.append( constraint ); - } -} - -bool Engine::allPlConstraintsHold() -{ - return _violatedPlConstraints.empty(); -} - -bool Engine::allNonlinearConstraintsHold() -{ - for ( const auto &constraint : _nlConstraints ) - { - if ( !constraint->satisfied() ) - return false; - } - return true; -} - -bool Engine::hasBranchingCandidate() -{ - for ( const auto &constraint : _plConstraints ) - { - if ( constraint->isActive() && !constraint->phaseFixed() ) - return true; - } - return false; -} - -void Engine::selectViolatedPlConstraint() -{ - ASSERT( !_violatedPlConstraints.empty() ); - - _plConstraintToFix = _smtCore.chooseViolatedConstraintForFixing( _violatedPlConstraints ); - - ASSERT( _plConstraintToFix ); -} - -void Engine::reportPlViolation() -{ - _smtCore.reportViolatedConstraint( _plConstraintToFix ); -} - -void Engine::storeState( EngineState &state, TableauStateStorageLevel level ) const -{ - _tableau->storeState( state._tableauState, level ); - state._tableauStateStorageLevel = level; - - for ( const auto &constraint : _plConstraints ) - state._plConstraintToState[constraint] = constraint->duplicateConstraint(); - - state._numPlConstraintsDisabledByValidSplits = _numPlConstraintsDisabledByValidSplits; -} - -void Engine::restoreState( const EngineState &state ) -{ - ENGINE_LOG( "Restore state starting" ); - - if ( state._tableauStateStorageLevel == TableauStateStorageLevel::STORE_NONE ) - throw MarabouError( MarabouError::RESTORING_ENGINE_FROM_INVALID_STATE ); - - ENGINE_LOG( "\tRestoring tableau state" ); - _tableau->restoreState( state._tableauState, state._tableauStateStorageLevel ); - - ENGINE_LOG( "\tRestoring constraint states" ); - for ( auto &constraint : _plConstraints ) - { - if ( !state._plConstraintToState.exists( constraint ) ) - throw MarabouError( MarabouError::MISSING_PL_CONSTRAINT_STATE ); - - constraint->restoreState( state._plConstraintToState[constraint] ); - } - - _numPlConstraintsDisabledByValidSplits = state._numPlConstraintsDisabledByValidSplits; - - if ( _lpSolverType == LPSolverType::NATIVE ) - { - // Make sure the data structures are initialized to the correct size - _rowBoundTightener->setDimensions(); - adjustWorkMemorySize(); - _activeEntryStrategy->resizeHook( _tableau ); - _costFunctionManager->initialize(); - } - - // Reset the violation counts in the SMT core - _smtCore.resetSplitConditions(); -} - -void Engine::setNumPlConstraintsDisabledByValidSplits( unsigned numConstraints ) -{ - _numPlConstraintsDisabledByValidSplits = numConstraints; -} - -bool Engine::attemptToMergeVariables( unsigned x1, unsigned x2 ) -{ - /* - First, we need to ensure that the variables are both non-basic. - */ - - unsigned n = _tableau->getN(); - unsigned m = _tableau->getM(); - - if ( _tableau->isBasic( x1 ) ) - { - TableauRow x1Row( n - m ); - _tableau->getTableauRow( _tableau->variableToIndex( x1 ), &x1Row ); - - bool found = false; - double bestCoefficient = 0.0; - unsigned nonBasic = 0; - for ( unsigned i = 0; i < n - m; ++i ) - { - if ( x1Row._row[i]._var != x2 ) - { - double contender = FloatUtils::abs( x1Row._row[i]._coefficient ); - if ( FloatUtils::gt( contender, bestCoefficient ) ) - { - found = true; - nonBasic = x1Row._row[i]._var; - bestCoefficient = contender; - } - } - } - - if ( !found ) - return false; - - _tableau->setEnteringVariableIndex( _tableau->variableToIndex( nonBasic ) ); - _tableau->setLeavingVariableIndex( _tableau->variableToIndex( x1 ) ); - - // Make sure the change column and pivot row are up-to-date - strategies - // such as projected steepest edge need these for their internal updates. - _tableau->computeChangeColumn(); - _tableau->computePivotRow(); - - _activeEntryStrategy->prePivotHook( _tableau, false ); - _tableau->performDegeneratePivot(); - _activeEntryStrategy->postPivotHook( _tableau, false ); - } - - if ( _tableau->isBasic( x2 ) ) - { - TableauRow x2Row( n - m ); - _tableau->getTableauRow( _tableau->variableToIndex( x2 ), &x2Row ); - - bool found = false; - double bestCoefficient = 0.0; - unsigned nonBasic = 0; - for ( unsigned i = 0; i < n - m; ++i ) - { - if ( x2Row._row[i]._var != x1 ) - { - double contender = FloatUtils::abs( x2Row._row[i]._coefficient ); - if ( FloatUtils::gt( contender, bestCoefficient ) ) - { - found = true; - nonBasic = x2Row._row[i]._var; - bestCoefficient = contender; - } - } - } - - if ( !found ) - return false; - - _tableau->setEnteringVariableIndex( _tableau->variableToIndex( nonBasic ) ); - _tableau->setLeavingVariableIndex( _tableau->variableToIndex( x2 ) ); - - // Make sure the change column and pivot row are up-to-date - strategies - // such as projected steepest edge need these for their internal updates. - _tableau->computeChangeColumn(); - _tableau->computePivotRow(); - - _activeEntryStrategy->prePivotHook( _tableau, false ); - _tableau->performDegeneratePivot(); - _activeEntryStrategy->postPivotHook( _tableau, false ); - } - - // Both variables are now non-basic, so we can merge their columns - _tableau->mergeColumns( x1, x2 ); - DEBUG( _tableau->verifyInvariants() ); - - // Reset the entry strategy - _activeEntryStrategy->initialize( _tableau ); - - return true; -} - -void Engine::applySplit( const PiecewiseLinearCaseSplit &split ) -{ - ENGINE_LOG( "" ); - ENGINE_LOG( "Applying a split. " ); - - DEBUG( _tableau->verifyInvariants() ); - - List bounds = split.getBoundTightenings(); - List equations = split.getEquations(); - - // We assume that case splits only apply new bounds but do not apply - // new equations. This can always be made possible. - if ( _lpSolverType != LPSolverType::NATIVE && equations.size() > 0 ) - throw MarabouError( MarabouError::FEATURE_NOT_YET_SUPPORTED, - "Can only update bounds when using non-native" - "simplex engine!" ); - - for ( auto &equation : equations ) - { - /* - First, adjust the equation if any variables have been merged. - E.g., if the equation is x1 + x2 + x3 = 0, and x1 and x2 have been - merged, the equation becomes 2x1 + x3 = 0 - */ - for ( auto &addend : equation._addends ) - addend._variable = _tableau->getVariableAfterMerging( addend._variable ); - - List::iterator addend; - List::iterator otherAddend; - - addend = equation._addends.begin(); - while ( addend != equation._addends.end() ) - { - otherAddend = addend; - ++otherAddend; - - while ( otherAddend != equation._addends.end() ) - { - if ( otherAddend->_variable == addend->_variable ) - { - addend->_coefficient += otherAddend->_coefficient; - otherAddend = equation._addends.erase( otherAddend ); - } - else - ++otherAddend; - } - - if ( FloatUtils::isZero( addend->_coefficient ) ) - addend = equation._addends.erase( addend ); - else - ++addend; - } - - /* - In the general case, we just add the new equation to the tableau. - However, we also support a very common case: equations of the form - x1 = x2, which are common, e.g., with ReLUs. For these equations we - may be able to merge two columns of the tableau. - */ - unsigned x1, x2; - bool canMergeColumns = - // Only if the flag is on - GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS && - // Only if the equation has the correct form - equation.isVariableMergingEquation( x1, x2 ) && - // And only if the variables are not out of bounds - ( !_tableau->isBasic( x1 ) || - !_tableau->basicOutOfBounds( _tableau->variableToIndex( x1 ) ) ) && - ( !_tableau->isBasic( x2 ) || - !_tableau->basicOutOfBounds( _tableau->variableToIndex( x2 ) ) ); - - bool columnsSuccessfullyMerged = false; - if ( canMergeColumns ) - columnsSuccessfullyMerged = attemptToMergeVariables( x1, x2 ); - - if ( !columnsSuccessfullyMerged ) - { - // General case: add a new equation to the tableau - unsigned auxVariable = _tableau->addEquation( equation ); - _activeEntryStrategy->resizeHook( _tableau ); - - switch ( equation._type ) - { - case Equation::GE: - bounds.append( Tightening( auxVariable, 0.0, Tightening::UB ) ); - break; - - case Equation::LE: - bounds.append( Tightening( auxVariable, 0.0, Tightening::LB ) ); - break; - - case Equation::EQ: - bounds.append( Tightening( auxVariable, 0.0, Tightening::LB ) ); - bounds.append( Tightening( auxVariable, 0.0, Tightening::UB ) ); - break; - - default: - ASSERT( false ); - break; - } - } - } - - if ( _lpSolverType == LPSolverType::NATIVE ) - { - adjustWorkMemorySize(); - } - - for ( auto &bound : bounds ) - { - unsigned variable = _tableau->getVariableAfterMerging( bound._variable ); - - if ( bound._type == Tightening::LB ) - { - ENGINE_LOG( - Stringf( "x%u: lower bound set to %.3lf", variable, bound._value ).ascii() ); - if ( _produceUNSATProofs && - FloatUtils::gt( bound._value, _boundManager.getLowerBound( bound._variable ) ) ) - { - _boundManager.resetExplanation( variable, Tightening::LB ); - updateGroundLowerBound( variable, bound._value ); - _boundManager.tightenLowerBound( variable, bound._value ); - } - else if ( !_produceUNSATProofs ) - _boundManager.tightenLowerBound( variable, bound._value ); - } - else - { - ENGINE_LOG( - Stringf( "x%u: upper bound set to %.3lf", variable, bound._value ).ascii() ); - if ( _produceUNSATProofs && - FloatUtils::lt( bound._value, _boundManager.getUpperBound( bound._variable ) ) ) - { - _boundManager.resetExplanation( variable, Tightening::UB ); - updateGroundUpperBound( variable, bound._value ); - _boundManager.tightenUpperBound( variable, bound._value ); - } - else if ( !_produceUNSATProofs ) - _boundManager.tightenUpperBound( variable, bound._value ); - } - } - - if ( _produceUNSATProofs && _UNSATCertificateCurrentPointer ) - ( **_UNSATCertificateCurrentPointer ).setVisited(); - - DEBUG( _tableau->verifyInvariants() ); - ENGINE_LOG( "Done with split\n" ); -} - -void Engine::applyBoundTightenings() -{ - List tightenings; - _boundManager.getTightenings( tightenings ); - - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB ) - _tableau->tightenLowerBound( tightening._variable, tightening._value ); - else - _tableau->tightenUpperBound( tightening._variable, tightening._value ); - } -} - -void Engine::applyAllRowTightenings() -{ - applyBoundTightenings(); -} - -void Engine::applyAllConstraintTightenings() -{ - applyBoundTightenings(); -} - -void Engine::applyAllBoundTightenings() -{ - struct timespec start = TimeUtils::sampleMicro(); - - if ( _lpSolverType == LPSolverType::NATIVE ) - applyAllRowTightenings(); - applyAllConstraintTightenings(); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_APPLYING_STORED_TIGHTENINGS_MICRO, - TimeUtils::timePassed( start, end ) ); -} - -bool Engine::applyAllValidConstraintCaseSplits() -{ - struct timespec start = TimeUtils::sampleMicro(); - - bool appliedSplit = false; - for ( auto &constraint : _plConstraints ) - if ( applyValidConstraintCaseSplit( constraint ) ) - appliedSplit = true; - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_PERFORMING_VALID_CASE_SPLITS_MICRO, - TimeUtils::timePassed( start, end ) ); - - return appliedSplit; -} - -bool Engine::applyValidConstraintCaseSplit( PiecewiseLinearConstraint *constraint ) -{ - if ( constraint->isActive() && constraint->phaseFixed() ) - { - String constraintString; - constraint->dump( constraintString ); - ENGINE_LOG( Stringf( "A constraint has become valid. Dumping constraint: %s", - constraintString.ascii() ) - .ascii() ); - - constraint->setActiveConstraint( false ); - PiecewiseLinearCaseSplit validSplit = constraint->getValidCaseSplit(); - _smtCore.recordImpliedValidSplit( validSplit ); - applySplit( validSplit ); - - if ( _soiManager ) - _soiManager->removeCostComponentFromHeuristicCost( constraint ); - ++_numPlConstraintsDisabledByValidSplits; - - return true; - } - - return false; -} - -bool Engine::shouldCheckDegradation() -{ - return _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % - GlobalConfiguration::DEGRADATION_CHECKING_FREQUENCY == - 0; -} - -bool Engine::highDegradation() -{ - struct timespec start = TimeUtils::sampleMicro(); - - double degradation = _degradationChecker.computeDegradation( *_tableau ); - _statistics.setDoubleAttribute( Statistics::CURRENT_DEGRADATION, degradation ); - if ( FloatUtils::gt( degradation, - _statistics.getDoubleAttribute( Statistics::MAX_DEGRADATION ) ) ) - _statistics.setDoubleAttribute( Statistics::MAX_DEGRADATION, degradation ); - - bool result = FloatUtils::gt( degradation, GlobalConfiguration::DEGRADATION_THRESHOLD ); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_DEGRADATION_CHECKING, - TimeUtils::timePassed( start, end ) ); - - // Debug - if ( result ) - printf( "High degradation found!\n" ); - // - - return result; -} - -void Engine::tightenBoundsOnConstraintMatrix() -{ - struct timespec start = TimeUtils::sampleMicro(); - - if ( _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % - GlobalConfiguration::BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY == - 0 ) - { - _rowBoundTightener->examineConstraintMatrix( true ); - _statistics.incLongAttribute( Statistics::NUM_BOUND_TIGHTENINGS_ON_CONSTRAINT_MATRIX ); - } - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_CONSTRAINT_MATRIX_BOUND_TIGHTENING_MICRO, - TimeUtils::timePassed( start, end ) ); -} - -void Engine::explicitBasisBoundTightening() -{ - struct timespec start = TimeUtils::sampleMicro(); - - bool saturation = GlobalConfiguration::EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION; - - _statistics.incLongAttribute( Statistics::NUM_BOUND_TIGHTENINGS_ON_EXPLICIT_BASIS ); - - switch ( GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE ) - { - case GlobalConfiguration::COMPUTE_INVERTED_BASIS_MATRIX: - _rowBoundTightener->examineInvertedBasisMatrix( saturation ); - break; - - case GlobalConfiguration::USE_IMPLICIT_INVERTED_BASIS_MATRIX: - _rowBoundTightener->examineImplicitInvertedBasisMatrix( saturation ); - break; - - case GlobalConfiguration::DISABLE_EXPLICIT_BASIS_TIGHTENING: - break; - } - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_EXPLICIT_BASIS_BOUND_TIGHTENING_MICRO, - TimeUtils::timePassed( start, end ) ); -} - -void Engine::performPrecisionRestoration( PrecisionRestorer::RestoreBasics restoreBasics ) -{ - struct timespec start = TimeUtils::sampleMicro(); - - // debug - double before = _degradationChecker.computeDegradation( *_tableau ); - // - - _precisionRestorer.restorePrecision( *this, *_tableau, _smtCore, restoreBasics ); - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_PRECISION_RESTORATION, - TimeUtils::timePassed( start, end ) ); - - _statistics.incUnsignedAttribute( Statistics::NUM_PRECISION_RESTORATIONS ); - - // debug - double after = _degradationChecker.computeDegradation( *_tableau ); - if ( _verbosity > 0 ) - printf( "Performing precision restoration. Degradation before: %.15lf. After: %.15lf\n", - before, - after ); - // - - if ( highDegradation() && ( restoreBasics == PrecisionRestorer::RESTORE_BASICS ) ) - { - // First round, with basic restoration, still resulted in high degradation. - // Try again! - start = TimeUtils::sampleMicro(); - _precisionRestorer.restorePrecision( - *this, *_tableau, _smtCore, PrecisionRestorer::DO_NOT_RESTORE_BASICS ); - end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_PRECISION_RESTORATION, - TimeUtils::timePassed( start, end ) ); - _statistics.incUnsignedAttribute( Statistics::NUM_PRECISION_RESTORATIONS ); - - // debug - double afterSecond = _degradationChecker.computeDegradation( *_tableau ); - if ( _verbosity > 0 ) - printf( - "Performing 2nd precision restoration. Degradation before: %.15lf. After: %.15lf\n", - after, - afterSecond ); - - if ( highDegradation() ) - throw MarabouError( MarabouError::RESTORATION_FAILED_TO_RESTORE_PRECISION ); - } -} - -void Engine::storeInitialEngineState() -{ - if ( !_initialStateStored ) - { - _precisionRestorer.storeInitialEngineState( *this ); - _initialStateStored = true; - } -} - -bool Engine::basisRestorationNeeded() const -{ - return _basisRestorationRequired == Engine::STRONG_RESTORATION_NEEDED || - _basisRestorationRequired == Engine::WEAK_RESTORATION_NEEDED; -} - -const Statistics *Engine::getStatistics() const -{ - return &_statistics; -} - -Query *Engine::getQuery() -{ - return &( *_preprocessedQuery ); -} - -void Engine::checkBoundCompliancyWithDebugSolution() -{ - if ( _smtCore.checkSkewFromDebuggingSolution() ) - { - // The stack is compliant, we should not have learned any non-compliant bounds - for ( const auto &var : _preprocessedQuery->_debuggingSolution ) - { - // printf( "Looking at var %u\n", var.first ); - - if ( FloatUtils::gt( _tableau->getLowerBound( var.first ), var.second, 1e-5 ) ) - { - printf( "Error! The stack is compliant, but learned an non-compliant bound: " - "Solution for x%u is %.15lf, but learned lower bound %.15lf\n", - var.first, - var.second, - _tableau->getLowerBound( var.first ) ); - - throw MarabouError( MarabouError::DEBUGGING_ERROR ); - } - - if ( FloatUtils::lt( _tableau->getUpperBound( var.first ), var.second, 1e-5 ) ) - { - printf( "Error! The stack is compliant, but learned an non-compliant bound: " - "Solution for %u is %.15lf, but learned upper bound %.15lf\n", - var.first, - var.second, - _tableau->getUpperBound( var.first ) ); - - throw MarabouError( MarabouError::DEBUGGING_ERROR ); - } - } - } -} - -void Engine::quitSignal() -{ - _quitRequested = true; -} - -Engine::ExitCode Engine::getExitCode() const -{ - return _exitCode; -} - -std::atomic_bool *Engine::getQuitRequested() -{ - return &_quitRequested; -} - -List Engine::getInputVariables() const -{ - return _preprocessedQuery->getInputVariables(); -} - -void Engine::performSimulation() -{ - if ( _simulationSize == 0 || !_networkLevelReasoner || - _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::NONE || - _produceUNSATProofs ) - { - ENGINE_LOG( Stringf( "Skip simulation..." ).ascii() ); - return; - } - - // outer vector is for neuron - // inner vector is for simulation value - Vector> simulations; - - std::mt19937 mt( GlobalConfiguration::SIMULATION_RANDOM_SEED ); - - for ( unsigned i = 0; i < _networkLevelReasoner->getLayer( 0 )->getSize(); ++i ) - { - std::uniform_real_distribution distribution( - _networkLevelReasoner->getLayer( 0 )->getLb( i ), - _networkLevelReasoner->getLayer( 0 )->getUb( i ) ); - Vector simulationInput( _simulationSize ); - - for ( unsigned j = 0; j < _simulationSize; ++j ) - simulationInput[j] = distribution( mt ); - simulations.append( simulationInput ); - } - _networkLevelReasoner->simulate( &simulations ); -} - -unsigned Engine::performSymbolicBoundTightening( Query *inputQuery ) -{ - if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::NONE || - ( !_networkLevelReasoner ) || _produceUNSATProofs ) - return 0; - - struct timespec start = TimeUtils::sampleMicro(); - - unsigned numTightenedBounds = 0; - - // Step 1: tell the NLR about the current bounds - if ( inputQuery ) - { - // Obtain from and store bounds into inputquery if it is not null. - _networkLevelReasoner->obtainCurrentBounds( *inputQuery ); - } - else - { - // Get bounds from Tableau. - _networkLevelReasoner->obtainCurrentBounds(); - } - - // Step 2: perform SBT - if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING ) - _networkLevelReasoner->symbolicBoundPropagation(); - else if ( _symbolicBoundTighteningType == SymbolicBoundTighteningType::DEEP_POLY ) - _networkLevelReasoner->deepPolyPropagation(); - - // Step 3: Extract the bounds - List tightenings; - _networkLevelReasoner->getConstraintTightenings( tightenings ); - - if ( inputQuery ) - { - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB && - FloatUtils::gt( tightening._value, - inputQuery->getLowerBound( tightening._variable ) ) ) - { - inputQuery->setLowerBound( tightening._variable, tightening._value ); - ++numTightenedBounds; - } - - if ( tightening._type == Tightening::UB && - FloatUtils::lt( tightening._value, - inputQuery->getUpperBound( tightening._variable ) ) ) - { - inputQuery->setUpperBound( tightening._variable, tightening._value ); - ++numTightenedBounds; - } - } - } - else - { - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB && - FloatUtils::gt( tightening._value, - _tableau->getLowerBound( tightening._variable ) ) ) - { - _tableau->tightenLowerBound( tightening._variable, tightening._value ); - ++numTightenedBounds; - } - - if ( tightening._type == Tightening::UB && - FloatUtils::lt( tightening._value, - _tableau->getUpperBound( tightening._variable ) ) ) - { - _tableau->tightenUpperBound( tightening._variable, tightening._value ); - ++numTightenedBounds; - } - } - } - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_PERFORMING_SYMBOLIC_BOUND_TIGHTENING, - TimeUtils::timePassed( start, end ) ); - _statistics.incLongAttribute( Statistics::NUM_TIGHTENINGS_FROM_SYMBOLIC_BOUND_TIGHTENING, - numTightenedBounds ); - return numTightenedBounds; -} - -bool Engine::shouldExitDueToTimeout( double timeout ) const -{ - // A timeout value of 0 means no time limit - if ( timeout == 0 ) - return false; - - return static_cast( _statistics.getTotalTimeInMicro() ) / MICROSECONDS_TO_SECONDS > - timeout; -} - -void Engine::preContextPushHook() -{ - struct timespec start = TimeUtils::sampleMicro(); - _boundManager.storeLocalBounds(); - if ( _produceUNSATProofs ) - _groundBoundManager.storeLocalBounds(); - struct timespec end = TimeUtils::sampleMicro(); - - _statistics.incLongAttribute( Statistics::TIME_CONTEXT_PUSH_HOOK, - TimeUtils::timePassed( start, end ) ); -} - -void Engine::postContextPopHook() -{ - struct timespec start = TimeUtils::sampleMicro(); - - _boundManager.restoreLocalBounds(); - if ( _produceUNSATProofs ) - _groundBoundManager.restoreLocalBounds(); - _tableau->postContextPopHook(); - - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_CONTEXT_POP_HOOK, - TimeUtils::timePassed( start, end ) ); -} - -void Engine::reset() -{ - resetStatistics(); - _sncMode = false; - clearViolatedPLConstraints(); - resetSmtCore(); - resetBoundTighteners(); - resetExitCode(); -} - -void Engine::resetStatistics() -{ - Statistics statistics; - _statistics = statistics; - _smtCore.setStatistics( &_statistics ); - _tableau->setStatistics( &_statistics ); - _rowBoundTightener->setStatistics( &_statistics ); - _preprocessor.setStatistics( &_statistics ); - _activeEntryStrategy->setStatistics( &_statistics ); - - _statistics.stampStartingTime(); -} - -void Engine::clearViolatedPLConstraints() -{ - _violatedPlConstraints.clear(); - _plConstraintToFix = NULL; -} - -void Engine::resetSmtCore() -{ - _smtCore.reset(); - _smtCore.initializeScoreTrackerIfNeeded( _plConstraints ); -} - -void Engine::resetExitCode() -{ - _exitCode = Engine::NOT_DONE; -} - -void Engine::resetBoundTighteners() -{ -} - -void Engine::warmStart() -{ - // An NLR is required for a warm start - if ( !_networkLevelReasoner ) - return; - - // First, choose an arbitrary assignment for the input variables - unsigned numInputVariables = _preprocessedQuery->getNumInputVariables(); - unsigned numOutputVariables = _preprocessedQuery->getNumOutputVariables(); - - if ( numInputVariables == 0 ) - { - // Trivial case: all inputs are fixed, nothing to evaluate - return; - } - - double *inputAssignment = new double[numInputVariables]; - double *outputAssignment = new double[numOutputVariables]; - - for ( unsigned i = 0; i < numInputVariables; ++i ) - { - unsigned variable = _preprocessedQuery->inputVariableByIndex( i ); - inputAssignment[i] = _tableau->getLowerBound( variable ); - } - - // Evaluate the network for this assignment - _networkLevelReasoner->evaluate( inputAssignment, outputAssignment ); - - // Try to update as many variables as possible to match their assignment - for ( unsigned i = 0; i < _networkLevelReasoner->getNumberOfLayers(); ++i ) - { - const NLR::Layer *layer = _networkLevelReasoner->getLayer( i ); - unsigned layerSize = layer->getSize(); - const double *assignment = layer->getAssignment(); - - for ( unsigned j = 0; j < layerSize; ++j ) - { - if ( layer->neuronHasVariable( j ) ) - { - unsigned variable = layer->neuronToVariable( j ); - if ( !_tableau->isBasic( variable ) ) - _tableau->setNonBasicAssignment( variable, assignment[j], false ); - } - } - } - - // We did what we could for the non-basics; now let the tableau compute - // the basic assignment - _tableau->computeAssignment(); - - delete[] outputAssignment; - delete[] inputAssignment; -} - -void Engine::checkOverallProgress() -{ - // Get fresh statistics - unsigned numVisitedStates = - _statistics.getUnsignedAttribute( Statistics::NUM_VISITED_TREE_STATES ); - unsigned long long currentIteration = - _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ); - - if ( numVisitedStates > _lastNumVisitedStates ) - { - // Progress has been made - _lastNumVisitedStates = numVisitedStates; - _lastIterationWithProgress = currentIteration; - } - else - { - // No progress has been made. If it's been too long, request a restoration - if ( currentIteration > - _lastIterationWithProgress + GlobalConfiguration::MAX_ITERATIONS_WITHOUT_PROGRESS ) - { - ENGINE_LOG( - "checkOverallProgress detected cycling. Requesting a precision restoration" ); - _basisRestorationRequired = Engine::STRONG_RESTORATION_NEEDED; - _lastIterationWithProgress = currentIteration; - } - } -} - -void Engine::updateDirections() -{ - if ( GlobalConfiguration::USE_POLARITY_BASED_DIRECTION_HEURISTICS ) - for ( const auto &constraint : _plConstraints ) - if ( constraint->supportPolarity() && constraint->isActive() && - !constraint->phaseFixed() ) - constraint->updateDirection(); -} - -void Engine::decideBranchingHeuristics() -{ - DivideStrategy divideStrategy = Options::get()->getDivideStrategy(); - if ( divideStrategy == DivideStrategy::Auto ) - { - if ( !_produceUNSATProofs && !_preprocessedQuery->getInputVariables().empty() && - _preprocessedQuery->getInputVariables().size() < - GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD ) - { - // NOTE: the benefit of input splitting is minimal with abstract intepretation disabled. - // Therefore, since the proof production mode does not currently support that, we do - // not perform input-splitting in proof production mode. - divideStrategy = DivideStrategy::LargestInterval; - if ( _verbosity >= 2 ) - printf( "Branching heuristics set to LargestInterval\n" ); - } - else - { - if ( GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH ) - { - divideStrategy = DivideStrategy::PseudoImpact; - if ( _verbosity >= 2 ) - printf( "Branching heuristics set to PseudoImpact\n" ); - } - else - { - divideStrategy = DivideStrategy::ReLUViolation; - if ( _verbosity >= 2 ) - printf( "Branching heuristics set to ReLUViolation\n" ); - } - } - } - ASSERT( divideStrategy != DivideStrategy::Auto ); - _smtCore.setBranchingHeuristics( divideStrategy ); - _smtCore.initializeScoreTrackerIfNeeded( _plConstraints ); -} - -PiecewiseLinearConstraint *Engine::pickSplitPLConstraintBasedOnPolarity() -{ - ENGINE_LOG( Stringf( "Using Polarity-based heuristics..." ).ascii() ); - - if ( !_networkLevelReasoner ) - return NULL; - - List constraints = - _networkLevelReasoner->getConstraintsInTopologicalOrder(); - - Map scoreToConstraint; - for ( auto &plConstraint : constraints ) - { - if ( plConstraint->supportPolarity() && plConstraint->isActive() && - !plConstraint->phaseFixed() ) - { - plConstraint->updateScoreBasedOnPolarity(); - scoreToConstraint[plConstraint->getScore()] = plConstraint; - if ( scoreToConstraint.size() >= GlobalConfiguration::POLARITY_CANDIDATES_THRESHOLD ) - break; - } - } - if ( scoreToConstraint.size() > 0 ) - { - ENGINE_LOG( Stringf( "Score of the picked ReLU: %f", ( *scoreToConstraint.begin() ).first ) - .ascii() ); - return ( *scoreToConstraint.begin() ).second; - } - else - return NULL; -} - -PiecewiseLinearConstraint *Engine::pickSplitPLConstraintBasedOnTopology() -{ - // We push the first unfixed ReLU in the topology order to the _candidatePlConstraints - ENGINE_LOG( Stringf( "Using EarliestReLU heuristics..." ).ascii() ); - - if ( !_networkLevelReasoner ) - throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_NOT_AVAILABLE ); - - List constraints = - _networkLevelReasoner->getConstraintsInTopologicalOrder(); - - for ( auto &plConstraint : constraints ) - { - if ( plConstraint->isActive() && !plConstraint->phaseFixed() ) - return plConstraint; - } - return NULL; -} - -PiecewiseLinearConstraint *Engine::pickSplitPLConstraintBasedOnIntervalWidth() -{ - // We push the first unfixed ReLU in the topology order to the _candidatePlConstraints - ENGINE_LOG( Stringf( "Using LargestInterval heuristics..." ).ascii() ); - - unsigned inputVariableWithLargestInterval = 0; - double largestIntervalSoFar = 0; - for ( const auto &variable : _preprocessedQuery->getInputVariables() ) - { - double interval = _tableau->getUpperBound( variable ) - _tableau->getLowerBound( variable ); - if ( interval > largestIntervalSoFar ) - { - inputVariableWithLargestInterval = variable; - largestIntervalSoFar = interval; - } - } - - if ( largestIntervalSoFar == 0 ) - return NULL; - else - { - double mid = ( _tableau->getLowerBound( inputVariableWithLargestInterval ) + - _tableau->getUpperBound( inputVariableWithLargestInterval ) ) / - 2; - PiecewiseLinearCaseSplit s1; - s1.storeBoundTightening( - Tightening( inputVariableWithLargestInterval, mid, Tightening::UB ) ); - PiecewiseLinearCaseSplit s2; - s2.storeBoundTightening( - Tightening( inputVariableWithLargestInterval, mid, Tightening::LB ) ); - - List splits; - splits.append( s1 ); - splits.append( s2 ); - _disjunctionForSplitting = - std::unique_ptr( new DisjunctionConstraint( splits ) ); - return _disjunctionForSplitting.get(); - } -} - -PiecewiseLinearConstraint *Engine::pickSplitPLConstraint( DivideStrategy strategy ) -{ - ENGINE_LOG( Stringf( "Picking a split PLConstraint..." ).ascii() ); - - PiecewiseLinearConstraint *candidatePLConstraint = NULL; - if ( strategy == DivideStrategy::PseudoImpact ) - { - if ( _smtCore.getStackDepth() > 3 ) - candidatePLConstraint = _smtCore.getConstraintsWithHighestScore(); - else if ( !_preprocessedQuery->getInputVariables().empty() && - _preprocessedQuery->getInputVariables().size() < - GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD ) - candidatePLConstraint = pickSplitPLConstraintBasedOnIntervalWidth(); - else - { - candidatePLConstraint = pickSplitPLConstraintBasedOnPolarity(); - if ( candidatePLConstraint == NULL ) - candidatePLConstraint = _smtCore.getConstraintsWithHighestScore(); - } - } - else if ( strategy == DivideStrategy::Polarity ) - candidatePLConstraint = pickSplitPLConstraintBasedOnPolarity(); - else if ( strategy == DivideStrategy::EarliestReLU ) - candidatePLConstraint = pickSplitPLConstraintBasedOnTopology(); - else if ( strategy == DivideStrategy::LargestInterval && - ( ( _smtCore.getStackDepth() + 1 ) % - GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY != - 0 ) ) - { - // Conduct interval splitting periodically. - candidatePLConstraint = pickSplitPLConstraintBasedOnIntervalWidth(); - } - ENGINE_LOG( - Stringf( ( candidatePLConstraint ? "Picked..." - : "Unable to pick using the current strategy..." ) ) - .ascii() ); - return candidatePLConstraint; -} - -PiecewiseLinearConstraint *Engine::pickSplitPLConstraintSnC( SnCDivideStrategy strategy ) -{ - PiecewiseLinearConstraint *candidatePLConstraint = NULL; - if ( strategy == SnCDivideStrategy::Polarity ) - candidatePLConstraint = pickSplitPLConstraintBasedOnPolarity(); - else if ( strategy == SnCDivideStrategy::EarliestReLU ) - candidatePLConstraint = pickSplitPLConstraintBasedOnTopology(); - - ENGINE_LOG( Stringf( "Done updating scores..." ).ascii() ); - ENGINE_LOG( - Stringf( ( candidatePLConstraint ? "Picked..." - : "Unable to pick using the current strategy..." ) ) - .ascii() ); - return candidatePLConstraint; -} - -bool Engine::restoreSmtState( SmtState &smtState ) -{ - try - { - ASSERT( _smtCore.getStackDepth() == 0 ); - - // Step 1: all implied valid splits at root - for ( auto &validSplit : smtState._impliedValidSplitsAtRoot ) - { - applySplit( validSplit ); - _smtCore.recordImpliedValidSplit( validSplit ); - } - - tightenBoundsOnConstraintMatrix(); - _boundManager.propagateTightenings(); - // For debugging purposes - checkBoundCompliancyWithDebugSolution(); - do - performSymbolicBoundTightening(); - while ( applyAllValidConstraintCaseSplits() ); - - // Step 2: replay the stack - for ( auto &stackEntry : smtState._stack ) - { - _smtCore.replaySmtStackEntry( stackEntry ); - // Do all the bound propagation, and set ReLU constraints to inactive (at - // least the one corresponding to the _activeSplit applied above. - tightenBoundsOnConstraintMatrix(); - - // For debugging purposes - checkBoundCompliancyWithDebugSolution(); - do - performSymbolicBoundTightening(); - while ( applyAllValidConstraintCaseSplits() ); - } - _boundManager.propagateTightenings(); - } - catch ( const InfeasibleQueryException & ) - { - // The current query is unsat, and we need to pop. - // If we're at level 0, the whole query is unsat. - if ( _produceUNSATProofs ) - explainSimplexFailure(); - - if ( !_smtCore.popSplit() ) - { - if ( _verbosity > 0 ) - { - printf( "\nEngine::solve: UNSAT query\n" ); - _statistics.print(); - } - _exitCode = Engine::UNSAT; - for ( PiecewiseLinearConstraint *p : _plConstraints ) - p->setActiveConstraint( true ); - return false; - } - } - return true; -} - -void Engine::storeSmtState( SmtState &smtState ) -{ - _smtCore.storeSmtState( smtState ); -} - -bool Engine::solveWithMILPEncoding( double timeoutInSeconds ) -{ - try - { - if ( _lpSolverType == LPSolverType::NATIVE && _tableau->basisMatrixAvailable() ) - { - explicitBasisBoundTightening(); - applyAllBoundTightenings(); - applyAllValidConstraintCaseSplits(); - } - - while ( applyAllValidConstraintCaseSplits() ) - { - performSymbolicBoundTightening(); - } - } - catch ( const InfeasibleQueryException & ) - { - _exitCode = Engine::UNSAT; - return false; - } - - ENGINE_LOG( "Encoding the input query with Gurobi...\n" ); - _gurobi = std::unique_ptr( new GurobiWrapper() ); - _tableau->setGurobi( &( *_gurobi ) ); - _milpEncoder = std::unique_ptr( new MILPEncoder( *_tableau ) ); - _milpEncoder->encodeQuery( *_gurobi, *_preprocessedQuery ); - ENGINE_LOG( "Query encoded in Gurobi...\n" ); - - double timeoutForGurobi = ( timeoutInSeconds == 0 ? FloatUtils::infinity() : timeoutInSeconds ); - ENGINE_LOG( Stringf( "Gurobi timeout set to %f\n", timeoutForGurobi ).ascii() ) - _gurobi->setTimeLimit( timeoutForGurobi ); - if ( !_sncMode ) - _gurobi->setNumberOfThreads( Options::get()->getInt( Options::NUM_WORKERS ) ); - _gurobi->setVerbosity( _verbosity > 0 ); - _gurobi->solve(); - - if ( _gurobi->haveFeasibleSolution() ) - { - if ( allNonlinearConstraintsHold() ) - { - _exitCode = IEngine::SAT; - return true; - } - else - { - _exitCode = IEngine::UNKNOWN; - return false; - } - } - else if ( _gurobi->infeasible() ) - _exitCode = IEngine::UNSAT; - else if ( _gurobi->timeout() ) - _exitCode = IEngine::TIMEOUT; - else - throw NLRError( NLRError::UNEXPECTED_RETURN_STATUS_FROM_GUROBI ); - return false; -} - -bool Engine::preprocessingEnabled() const -{ - return _preprocessingEnabled; -} - -Preprocessor *Engine::getPreprocessor() -{ - return &_preprocessor; -} - -bool Engine::performDeepSoILocalSearch() -{ - ENGINE_LOG( "Performing local search..." ); - struct timespec start = TimeUtils::sampleMicro(); - ASSERT( allVarsWithinBounds() ); - - // All the linear constraints have been satisfied at this point. - // Update the cost function - _soiManager->initializePhasePattern(); - - LinearExpression initialPhasePattern = _soiManager->getCurrentSoIPhasePattern(); - - if ( initialPhasePattern.isZero() ) - { - if ( hasBranchingCandidate() ) - while ( !_smtCore.needToSplit() ) - _smtCore.reportRejectedPhasePatternProposal(); - return false; - } - - minimizeHeuristicCost( initialPhasePattern ); - ASSERT( allVarsWithinBounds() ); - _soiManager->updateCurrentPhasePatternForSatisfiedPLConstraints(); - // Always accept the first phase pattern. - _soiManager->acceptCurrentPhasePattern(); - double costOfLastAcceptedPhasePattern = - computeHeuristicCost( _soiManager->getCurrentSoIPhasePattern() ); - - double costOfProposedPhasePattern = FloatUtils::infinity(); - bool lastProposalAccepted = true; - while ( !_smtCore.needToSplit() ) - { - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TOTAL_TIME_LOCAL_SEARCH_MICRO, - TimeUtils::timePassed( start, end ) ); - start = end; - - if ( lastProposalAccepted ) - { - /* - Check whether the optimal solution to the last accepted phase - is a real solution. We only check this when the last proposal - was accepted, because rejected phase pattern must have resulted in - increase in the SoI cost. - - HW: Another option is to only do this check when - costOfLastAcceptedPhasePattern is 0, but this might be too strict. - The overhead is low anyway. - */ - collectViolatedPlConstraints(); - if ( allPlConstraintsHold() ) - { - if ( _lpSolverType == LPSolverType::NATIVE && - _tableau->getBasicAssignmentStatus() != - ITableau::BASIC_ASSIGNMENT_JUST_COMPUTED ) - { - if ( _verbosity > 0 ) - { - printf( "Before declaring sat, recomputing...\n" ); - } - // Make sure that the assignment is precise before declaring success - _tableau->computeAssignment(); - // If we actually have a real satisfying assignment, - return false; - } - else - { - ENGINE_LOG( "Performing local search - done." ); - return true; - } - } - else if ( FloatUtils::isZero( costOfLastAcceptedPhasePattern ) ) - { - // Corner case: the SoI is minimal but there are still some PL - // constraints (those not in the SoI) unsatisfied. - // In this case, we bump up the score of PLConstraints not in - // the SoI with the hope to branch on them early. - bumpUpPseudoImpactOfPLConstraintsNotInSoI(); - if ( hasBranchingCandidate() ) - while ( !_smtCore.needToSplit() ) - _smtCore.reportRejectedPhasePatternProposal(); - return false; - } - } - - // No satisfying assignment found for the last accepted phase pattern, - // propose an update to it. - _soiManager->proposePhasePatternUpdate(); - minimizeHeuristicCost( _soiManager->getCurrentSoIPhasePattern() ); - _soiManager->updateCurrentPhasePatternForSatisfiedPLConstraints(); - costOfProposedPhasePattern = - computeHeuristicCost( _soiManager->getCurrentSoIPhasePattern() ); - - // We have the "local" effect of change the cost term of some - // PLConstraints in the phase pattern. Use this information to influence - // the branching decision. - updatePseudoImpactWithSoICosts( costOfLastAcceptedPhasePattern, - costOfProposedPhasePattern ); - - // Decide whether to accept the last proposal. - if ( _soiManager->decideToAcceptCurrentProposal( costOfLastAcceptedPhasePattern, - costOfProposedPhasePattern ) ) - { - _soiManager->acceptCurrentPhasePattern(); - costOfLastAcceptedPhasePattern = costOfProposedPhasePattern; - lastProposalAccepted = true; - } - else - { - _smtCore.reportRejectedPhasePatternProposal(); - lastProposalAccepted = false; - } - } - - ENGINE_LOG( "Performing local search - done" ); - return false; -} - -void Engine::minimizeHeuristicCost( const LinearExpression &heuristicCost ) -{ - ENGINE_LOG( "Optimizing w.r.t. the current heuristic cost..." ); - - if ( _lpSolverType == LPSolverType::GUROBI ) - { - minimizeCostWithGurobi( heuristicCost ); - - ENGINE_LOG( - Stringf( "Current heuristic cost: %f", _gurobi->getOptimalCostOrObjective() ).ascii() ); - } - else - { - _tableau->toggleOptimization( true ); - - _heuristicCost = heuristicCost; - - bool localOptimumReached = false; - while ( !localOptimumReached ) - { - DEBUG( _tableau->verifyInvariants() ); - - mainLoopStatistics(); - if ( _verbosity > 1 && - _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % - _statisticsPrintingFrequency == - 0 ) - _statistics.print(); - - if ( !allVarsWithinBounds() ) - throw VariableOutOfBoundDuringOptimizationException(); - - if ( performPrecisionRestorationIfNeeded() ) - continue; - - ASSERT( allVarsWithinBounds() ); - - localOptimumReached = performSimplexStep(); - } - _tableau->toggleOptimization( false ); - ENGINE_LOG( Stringf( "Current heuristic cost: %f", computeHeuristicCost( heuristicCost ) ) - .ascii() ); - } - - ENGINE_LOG( "Optimizing w.r.t. the current heuristic cost - done\n" ); -} - -double Engine::computeHeuristicCost( const LinearExpression &heuristicCost ) -{ - return ( _costFunctionManager->computeGivenCostFunctionDirectly( heuristicCost._addends ) + - heuristicCost._constant ); -} - -void Engine::updatePseudoImpactWithSoICosts( double costOfLastAcceptedPhasePattern, - double costOfProposedPhasePattern ) -{ - ASSERT( _soiManager ); - - const List &constraintsUpdated = - _soiManager->getConstraintsUpdatedInLastProposal(); - // Score is divided by the number of updated constraints in the last - // proposal. In the Sum of Infeasibilities paper, only one constraint - // is updated each time. But we might consider alternative proposal - // strategy in the future. - double score = ( fabs( costOfLastAcceptedPhasePattern - costOfProposedPhasePattern ) / - constraintsUpdated.size() ); - - ASSERT( constraintsUpdated.size() > 0 ); - // Update the Pseudo-Impact estimation. - for ( const auto &constraint : constraintsUpdated ) - _smtCore.updatePLConstraintScore( constraint, score ); -} - -void Engine::bumpUpPseudoImpactOfPLConstraintsNotInSoI() -{ - ASSERT( _soiManager ); - for ( const auto &plConstraint : _plConstraints ) - { - if ( plConstraint->isActive() && !plConstraint->supportSoI() && - !plConstraint->phaseFixed() && !plConstraint->satisfied() ) - _smtCore.updatePLConstraintScore( - plConstraint, GlobalConfiguration::SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI ); - } -} - -void Engine::informLPSolverOfBounds() -{ - if ( _lpSolverType == LPSolverType::GUROBI ) - { - struct timespec start = TimeUtils::sampleMicro(); - for ( unsigned i = 0; i < _preprocessedQuery->getNumberOfVariables(); ++i ) - { - String variableName = _milpEncoder->getVariableNameFromVariable( i ); - _gurobi->setLowerBound( variableName, _tableau->getLowerBound( i ) ); - _gurobi->setUpperBound( variableName, _tableau->getUpperBound( i ) ); - } - _gurobi->updateModel(); - struct timespec end = TimeUtils::sampleMicro(); - _statistics.incLongAttribute( Statistics::TIME_ADDING_CONSTRAINTS_TO_MILP_SOLVER_MICRO, - TimeUtils::timePassed( start, end ) ); - } - else - { - // Bounds are already up-to-date in Tableau when using native Simplex. - return; - } -} - -bool Engine::minimizeCostWithGurobi( const LinearExpression &costFunction ) -{ - ASSERT( _gurobi && _milpEncoder ); - - struct timespec simplexStart = TimeUtils::sampleMicro(); - - _milpEncoder->encodeCostFunction( *_gurobi, costFunction ); - _gurobi->setTimeLimit( FloatUtils::infinity() ); - _gurobi->solve(); - - struct timespec simplexEnd = TimeUtils::sampleMicro(); - - _statistics.incLongAttribute( Statistics::TIME_SIMPLEX_STEPS_MICRO, - TimeUtils::timePassed( simplexStart, simplexEnd ) ); - _statistics.incLongAttribute( Statistics::NUM_SIMPLEX_STEPS, - _gurobi->getNumberOfSimplexIterations() ); - - if ( _gurobi->infeasible() ) - throw InfeasibleQueryException(); - else if ( _gurobi->optimal() ) - return true; - else - throw CommonError( CommonError::UNEXPECTED_GUROBI_STATUS, - Stringf( "Current status: %u", _gurobi->getStatusCode() ).ascii() ); - - return false; -} - -void Engine::checkGurobiBoundConsistency() const -{ - if ( _gurobi && _milpEncoder ) - { - for ( unsigned i = 0; i < _preprocessedQuery->getNumberOfVariables(); ++i ) - { - String iName = _milpEncoder->getVariableNameFromVariable( i ); - double gurobiLowerBound = _gurobi->getLowerBound( iName ); - double lowerBound = _tableau->getLowerBound( i ); - if ( !FloatUtils::areEqual( gurobiLowerBound, lowerBound ) ) - { - throw MarabouError( MarabouError::BOUNDS_NOT_UP_TO_DATE_IN_LP_SOLVER, - Stringf( "x%u lower bound inconsistent!" - " Gurobi: %f, Tableau: %f", - i, - gurobiLowerBound, - lowerBound ) - .ascii() ); - } - double gurobiUpperBound = _gurobi->getUpperBound( iName ); - double upperBound = _tableau->getUpperBound( i ); - - if ( !FloatUtils::areEqual( gurobiUpperBound, upperBound ) ) - { - throw MarabouError( MarabouError::BOUNDS_NOT_UP_TO_DATE_IN_LP_SOLVER, - Stringf( "x%u upper bound inconsistent!" - " Gurobi: %f, Tableau: %f", - i, - gurobiUpperBound, - upperBound ) - .ascii() ); - } - } - } -} - -bool Engine::consistentBounds() const -{ - return _boundManager.consistentBounds(); -} - -Query Engine::buildQueryFromCurrentState() const -{ - Query query = *_preprocessedQuery; - for ( unsigned i = 0; i < query.getNumberOfVariables(); ++i ) - { - query.setLowerBound( i, _tableau->getLowerBound( i ) ); - query.setUpperBound( i, _tableau->getUpperBound( i ) ); - } - return query; -} - -void Engine::updateGroundUpperBound( const unsigned var, const double value ) -{ - ASSERT( var < _tableau->getN() && _produceUNSATProofs ); - if ( FloatUtils::lt( value, _groundBoundManager.getUpperBound( var ) ) ) - _groundBoundManager.setUpperBound( var, value ); -} - -void Engine::updateGroundLowerBound( const unsigned var, const double value ) -{ - ASSERT( var < _tableau->getN() && _produceUNSATProofs ); - if ( FloatUtils::gt( value, _groundBoundManager.getLowerBound( var ) ) ) - _groundBoundManager.setLowerBound( var, value ); -} - -double Engine::getGroundBound( unsigned var, bool isUpper ) const -{ - ASSERT( var < _tableau->getN() && _produceUNSATProofs ); - return isUpper ? _groundBoundManager.getUpperBound( var ) - : _groundBoundManager.getLowerBound( var ); -} - -bool Engine::shouldProduceProofs() const -{ - return _produceUNSATProofs; -} - -void Engine::explainSimplexFailure() -{ - ASSERT( _produceUNSATProofs ); - - DEBUG( checkGroundBounds() ); - - unsigned infeasibleVar = _boundManager.getInconsistentVariable(); - - if ( infeasibleVar == IBoundManager::NO_VARIABLE_FOUND || - !certifyInfeasibility( infeasibleVar ) ) - infeasibleVar = explainFailureWithTableau(); - - if ( infeasibleVar == IBoundManager::NO_VARIABLE_FOUND ) - infeasibleVar = explainFailureWithCostFunction(); - - if ( infeasibleVar == IBoundManager::NO_VARIABLE_FOUND ) - { - _costFunctionManager->computeCoreCostFunction(); - infeasibleVar = explainFailureWithCostFunction(); - } - - if ( infeasibleVar == IBoundManager::NO_VARIABLE_FOUND ) - { - markLeafToDelegate(); - return; - } - - ASSERT( infeasibleVar < _tableau->getN() ); - ASSERT( _UNSATCertificateCurrentPointer && - !( **_UNSATCertificateCurrentPointer ).getContradiction() ); - _statistics.incUnsignedAttribute( Statistics::NUM_CERTIFIED_LEAVES ); - - writeContradictionToCertificate( infeasibleVar ); - - ( **_UNSATCertificateCurrentPointer ).makeLeaf(); -} - -bool Engine::certifyInfeasibility( unsigned var ) const -{ - ASSERT( _produceUNSATProofs ); - - Vector contradiction = computeContradiction( var ); - - if ( contradiction.empty() ) - return FloatUtils::isNegative( _groundBoundManager.getUpperBound( var ) - - _groundBoundManager.getLowerBound( var ) ); - - SparseUnsortedList sparseContradiction = SparseUnsortedList(); - - contradiction.empty() - ? sparseContradiction.initializeToEmpty() - : sparseContradiction.initialize( contradiction.data(), contradiction.size() ); - - // In case contradiction is a vector of zeros - if ( sparseContradiction.empty() ) - return FloatUtils::isNegative( _groundBoundManager.getUpperBound( var ) - - _groundBoundManager.getLowerBound( var ) ); - - double derivedBound = - UNSATCertificateUtils::computeCombinationUpperBound( sparseContradiction, - _tableau->getSparseA(), - _groundBoundManager.getUpperBounds(), - _groundBoundManager.getLowerBounds(), - _tableau->getN() ); - return FloatUtils::isNegative( derivedBound ); -} - -double Engine::explainBound( unsigned var, bool isUpper ) const -{ - ASSERT( _produceUNSATProofs ); - - SparseUnsortedList explanation( 0 ); - - if ( !_boundManager.isExplanationTrivial( var, isUpper ) ) - explanation = _boundManager.getExplanation( var, isUpper ); - - if ( explanation.empty() ) - return isUpper ? _groundBoundManager.getUpperBound( var ) - : _groundBoundManager.getLowerBound( var ); - - return UNSATCertificateUtils::computeBound( var, - isUpper, - explanation, - _tableau->getSparseA(), - _groundBoundManager.getUpperBounds(), - _groundBoundManager.getLowerBounds(), - _tableau->getN() ); -} - -bool Engine::validateBounds( unsigned var, double epsilon, bool isUpper ) const -{ - ASSERT( _produceUNSATProofs ); - - double explained, real; - explained = explainBound( var, isUpper ); - if ( isUpper ) - { - real = _boundManager.getUpperBound( var ); - if ( explained - real > epsilon ) - { - ENGINE_LOG( "Var %d. Computed Upper %.5lf, real %.5lf. Difference is %.10lf\n", - var, - explained, - real, - abs( explained - real ) ); - return false; - } - } - else - { - real = _boundManager.getLowerBound( var ); - if ( explained - real < -epsilon ) - { - ENGINE_LOG( "Var %d. Computed Lower %.5lf, real %.5lf. Difference is %.10lf\n", - var, - explained, - real, - abs( explained - real ) ); - return false; - } - } - return true; -} - -bool Engine::validateAllBounds( double epsilon ) const -{ - ASSERT( _produceUNSATProofs ); - - bool res = true; - - for ( unsigned var = 0; var < _tableau->getN(); ++var ) - if ( !validateBounds( var, epsilon, Tightening::UB ) || - !validateBounds( var, epsilon, Tightening::LB ) ) - res = false; - - return res; -} - -bool Engine::checkGroundBounds() const -{ - ASSERT( _produceUNSATProofs ); - - for ( unsigned i = 0; i < _tableau->getN(); ++i ) - { - if ( FloatUtils::gt( _groundBoundManager.getLowerBound( i ), - _boundManager.getLowerBound( i ) ) || - FloatUtils::lt( _groundBoundManager.getUpperBound( i ), - _boundManager.getUpperBound( i ) ) ) - return false; - } - return true; -} - -unsigned Engine::explainFailureWithTableau() -{ - ASSERT( _produceUNSATProofs ); - - // Failure of a simplex step implies infeasible bounds imposed by the row - TableauRow boundUpdateRow = TableauRow( _tableau->getN() ); - - // For every basic, check that is has no slack and its explanations indeed prove a - // contradiction - unsigned basicVar; - - for ( unsigned i = 0; i < _tableau->getM(); ++i ) - { - if ( _tableau->basicOutOfBounds( i ) ) - { - _tableau->getTableauRow( i, &boundUpdateRow ); - basicVar = boundUpdateRow._lhs; - - if ( FloatUtils::gt( _boundManager.computeRowBound( boundUpdateRow, Tightening::LB ), - _boundManager.getUpperBound( basicVar ) ) && - explainAndCheckContradiction( basicVar, Tightening::LB, &boundUpdateRow ) ) - return basicVar; - - if ( FloatUtils::lt( _boundManager.computeRowBound( boundUpdateRow, Tightening::UB ), - _boundManager.getLowerBound( basicVar ) ) && - explainAndCheckContradiction( basicVar, Tightening::UB, &boundUpdateRow ) ) - return basicVar; - } - } - - return IBoundManager::NO_VARIABLE_FOUND; -} - -unsigned Engine::explainFailureWithCostFunction() -{ - ASSERT( _produceUNSATProofs ); - - // Failure of a simplex step might imply infeasible bounds imposed by the cost function - unsigned curBasicVar; - unsigned infVar = IBoundManager::NO_VARIABLE_FOUND; - double curCost; - bool curUpper; - const SparseUnsortedList *costRow = _costFunctionManager->createRowOfCostFunction(); - - for ( unsigned i = 0; i < _tableau->getM(); ++i ) - { - curBasicVar = _tableau->basicIndexToVariable( i ); - curCost = _costFunctionManager->getBasicCost( i ); - - if ( FloatUtils::isZero( curCost ) ) - continue; - - curUpper = ( curCost < 0 ); - - // Check the basic variable has no slack - if ( !( !curUpper && FloatUtils::gt( _boundManager.computeSparseRowBound( - *costRow, Tightening::LB, curBasicVar ), - _boundManager.getUpperBound( curBasicVar ) ) ) && - !( curUpper && FloatUtils::lt( _boundManager.computeSparseRowBound( - *costRow, Tightening::UB, curBasicVar ), - _boundManager.getLowerBound( curBasicVar ) ) ) ) - - continue; - - // Check the explanation indeed proves a contradiction - if ( explainAndCheckContradiction( curBasicVar, curUpper, costRow ) ) - { - infVar = curBasicVar; - break; - } - } - - delete costRow; - return infVar; -} - -bool Engine::explainAndCheckContradiction( unsigned var, bool isUpper, const TableauRow *row ) -{ - ASSERT( _produceUNSATProofs ); - - SparseUnsortedList backup( 0 ); - backup = _boundManager.getExplanation( var, isUpper ); - - _boundManager.updateBoundExplanation( *row, isUpper, var ); - - // Ensure the proof is correct - if ( certifyInfeasibility( var ) ) - return true; - - // If not, restores previous certificate if the proof is wrong - _boundManager.setExplanation( backup, var, isUpper ); - - return false; -} - -bool Engine::explainAndCheckContradiction( unsigned var, - bool isUpper, - const SparseUnsortedList *row ) -{ - ASSERT( _produceUNSATProofs ); - - SparseUnsortedList backup( 0 ); - backup = _boundManager.getExplanation( var, isUpper ); - - _boundManager.updateBoundExplanationSparse( *row, isUpper, var ); - - // Ensure the proof is correct - if ( certifyInfeasibility( var ) ) - return true; - - // If not, restores previous certificate if the proof is wrong - _boundManager.setExplanation( backup, var, isUpper ); - - return false; -} - -UnsatCertificateNode *Engine::getUNSATCertificateCurrentPointer() const -{ - return _UNSATCertificateCurrentPointer->get(); -} - -void Engine::setUNSATCertificateCurrentPointer( UnsatCertificateNode *node ) -{ - _UNSATCertificateCurrentPointer->set( node ); -} - -const UnsatCertificateNode *Engine::getUNSATCertificateRoot() const -{ - return _UNSATCertificate; -} - -bool Engine::certifyUNSATCertificate() -{ - ASSERT( _produceUNSATProofs && _UNSATCertificate && !_smtCore.getStackDepth() ); - - for ( auto &constraint : _plConstraints ) - { - if ( !UNSATCertificateUtils::getSupportedActivations().exists( constraint->getType() ) ) - { - String activationType = constraint->serializeToString().tokenize( "," ).back(); - printf( "Certification Error! Network contains activation function %s, that is not yet " - "supported by Marabou certification.\n", - activationType.ascii() ); - return false; - } - } - - struct timespec certificationStart = TimeUtils::sampleMicro(); - _precisionRestorer.restoreInitialEngineState( *this ); - - Vector groundUpperBounds( _tableau->getN(), 0 ); - Vector groundLowerBounds( _tableau->getN(), 0 ); - - for ( unsigned i = 0; i < _tableau->getN(); ++i ) - { - groundUpperBounds[i] = _groundBoundManager.getUpperBound( i ); - groundLowerBounds[i] = _groundBoundManager.getLowerBound( i ); - } - - if ( GlobalConfiguration::WRITE_JSON_PROOF ) - { - File file( JsonWriter::PROOF_FILENAME ); - JsonWriter::writeProofToJson( _UNSATCertificate, - _tableau->getM(), - _tableau->getSparseA(), - groundUpperBounds, - groundLowerBounds, - _plConstraints, - file ); - } - - Checker unsatCertificateChecker( _UNSATCertificate, - _tableau->getM(), - _tableau->getSparseA(), - groundUpperBounds, - groundLowerBounds, - _plConstraints ); - bool certificationSucceeded = unsatCertificateChecker.check(); - - _statistics.setLongAttribute( - Statistics::TOTAL_CERTIFICATION_TIME, - TimeUtils::timePassed( certificationStart, TimeUtils::sampleMicro() ) ); - printf( "Certification time: " ); - _statistics.printLongAttributeAsTime( - _statistics.getLongAttribute( Statistics::TOTAL_CERTIFICATION_TIME ) ); - - if ( certificationSucceeded ) - { - printf( "Certified\n" ); - _statistics.incUnsignedAttribute( Statistics::CERTIFIED_UNSAT ); - if ( _statistics.getUnsignedAttribute( Statistics::NUM_DELEGATED_LEAVES ) ) - printf( "Some leaves were delegated and need to be certified separately by an SMT " - "solver\n" ); - } - else - printf( "Error certifying UNSAT certificate\n" ); - - DEBUG( { - ASSERT( certificationSucceeded ); - if ( _statistics.getUnsignedAttribute( Statistics::NUM_POPS ) ) - { - double delegationRatio = - _statistics.getUnsignedAttribute( Statistics::NUM_DELEGATED_LEAVES ) / - _statistics.getUnsignedAttribute( Statistics::NUM_CERTIFIED_LEAVES ); - ASSERT( FloatUtils::lt( delegationRatio, 0.01 ) ); - } - } ); - - return certificationSucceeded; -} - -void Engine::markLeafToDelegate() -{ - ASSERT( _produceUNSATProofs ); - - // Mark leaf with toDelegate Flag - UnsatCertificateNode *currentUnsatCertificateNode = _UNSATCertificateCurrentPointer->get(); - ASSERT( _UNSATCertificateCurrentPointer && !currentUnsatCertificateNode->getContradiction() ); - currentUnsatCertificateNode->setDelegationStatus( DelegationStatus::DELEGATE_SAVE ); - currentUnsatCertificateNode->deletePLCExplanations(); - _statistics.incUnsignedAttribute( Statistics::NUM_DELEGATED_LEAVES ); - - if ( !currentUnsatCertificateNode->getChildren().empty() ) - currentUnsatCertificateNode->makeLeaf(); -} - -const Vector Engine::computeContradiction( unsigned infeasibleVar ) const -{ - ASSERT( _produceUNSATProofs ); - - unsigned m = _tableau->getM(); - SparseUnsortedList upperBoundExplanation( 0 ); - SparseUnsortedList lowerBoundExplanation( 0 ); - - if ( !_boundManager.isExplanationTrivial( infeasibleVar, Tightening::UB ) ) - upperBoundExplanation = _boundManager.getExplanation( infeasibleVar, Tightening::UB ); - - if ( !_boundManager.isExplanationTrivial( infeasibleVar, Tightening::LB ) ) - lowerBoundExplanation = _boundManager.getExplanation( infeasibleVar, Tightening::LB ); - - if ( upperBoundExplanation.empty() && lowerBoundExplanation.empty() ) - return Vector( 0 ); - - Vector contradiction = Vector( m, 0 ); - - if ( !upperBoundExplanation.empty() ) - for ( const auto &entry : upperBoundExplanation ) - contradiction[entry._index] = entry._value; - - if ( !lowerBoundExplanation.empty() ) - for ( const auto &entry : lowerBoundExplanation ) - contradiction[entry._index] -= entry._value; - - return contradiction; -} - -void Engine::writeContradictionToCertificate( unsigned infeasibleVar ) const -{ - ASSERT( _produceUNSATProofs ); - - Vector leafContradictionVec = computeContradiction( infeasibleVar ); - - Contradiction *leafContradiction = leafContradictionVec.empty() - ? new Contradiction( infeasibleVar ) - : new Contradiction( leafContradictionVec ); - ( **_UNSATCertificateCurrentPointer ).setContradiction( leafContradiction ); -} - -const BoundExplainer *Engine::getBoundExplainer() const -{ - return _boundManager.getBoundExplainer(); -} - -void Engine::setBoundExplainerContent( BoundExplainer *boundExplainer ) -{ - _boundManager.copyBoundExplainerContent( boundExplainer ); -} - -void Engine::propagateBoundManagerTightenings() -{ - _boundManager.propagateTightenings(); -} - -void Engine::extractBounds( IQuery &inputQuery ) -{ - for ( unsigned i = 0; i < inputQuery.getNumberOfVariables(); ++i ) - { - if ( _preprocessingEnabled ) - { - // Has the variable been merged into another? - unsigned variable = i; - while ( _preprocessor.variableIsMerged( variable ) ) - variable = _preprocessor.getMergedIndex( variable ); - - // Symbolically fixed variables are ignored - if ( _preprocessor.variableIsUnusedAndSymbolicallyFixed( i ) ) - { - inputQuery.tightenLowerBound( i, FloatUtils::negativeInfinity() ); - inputQuery.tightenUpperBound( i, FloatUtils::infinity() ); - continue; - } - - // Fixed variables are easy: return the value they've been fixed to. - if ( _preprocessor.variableIsFixed( variable ) ) - { - inputQuery.tightenLowerBound( i, _preprocessor.getFixedValue( variable ) ); - inputQuery.tightenUpperBound( i, _preprocessor.getFixedValue( variable ) ); - continue; - } - - // We know which variable to look for, but it may have been assigned - // a new index, due to variable elimination - variable = _preprocessor.getNewIndex( variable ); - - inputQuery.tightenLowerBound( i, _preprocessedQuery->getLowerBound( variable ) ); - inputQuery.tightenUpperBound( i, _preprocessedQuery->getUpperBound( variable ) ); - } - else - { - inputQuery.tightenLowerBound( i, _preprocessedQuery->getLowerBound( i ) ); - inputQuery.tightenUpperBound( i, _preprocessedQuery->getUpperBound( i ) ); - } - } -} - -void Engine::addPLCLemma( std::shared_ptr &explanation ) -{ - if ( !_produceUNSATProofs ) - return; - - ASSERT( explanation && _UNSATCertificate && _UNSATCertificateCurrentPointer ) - _statistics.incUnsignedAttribute( Statistics::NUM_LEMMAS ); - _UNSATCertificateCurrentPointer->get()->addPLCLemma( explanation ); -} \ No newline at end of file From 0d51b8a2ce21d16f61ee88bc4c3d644fdda2765b Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 20:58:53 +0200 Subject: [PATCH 34/72] Add files via upload --- src/engine/Engine.cpp | 8 +++++--- src/engine/MILPSolverBoundTighteningType.h | 3 ++- src/engine/PolygonalTightening.h | 23 ++++++++++++---------- 3 files changed, 20 insertions(+), 14 deletions(-) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index 645b691ca3..e137c3cbb9 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -1579,9 +1579,11 @@ void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) // TODO: Remove this block after getting ready to support sigmoid with MILP Bound // Tightening. - if ( ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::MILP_ENCODING || - _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL || - _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION ) && + if ( ( _milpSolverBoundTighteningType == MILPSolverBoundTighteningType::MILP_ENCODING || + _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::MILP_ENCODING_INCREMENTAL || + _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION ) && _preprocessedQuery->getNonlinearConstraints().size() > 0 ) throw MarabouError( MarabouError::FEATURE_NOT_YET_SUPPORTED, "Marabou doesn't support sigmoid with MILP Bound Tightening" ); diff --git a/src/engine/MILPSolverBoundTighteningType.h b/src/engine/MILPSolverBoundTighteningType.h index ad839706f7..760f82e8d1 100644 --- a/src/engine/MILPSolverBoundTighteningType.h +++ b/src/engine/MILPSolverBoundTighteningType.h @@ -36,7 +36,8 @@ enum class MILPSolverBoundTighteningType { BACKWARD_ANALYSIS_CONVERGE = 6, // Perform backward analysis using the INVPROP Algorithm (arXiv:2302.01404v4 [cs.LG]) BACKWARD_ANALYSIS_INVPROP = 7, - // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 [cs.SE]) + // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 + // [cs.SE]) BACKWARD_ANALYSIS_PREIMAGE_APPROX = 8, // Option to have no MILP bound tightening performed NONE = 10, diff --git a/src/engine/PolygonalTightening.h b/src/engine/PolygonalTightening.h index 538ed9cee0..0f811f6fe6 100644 --- a/src/engine/PolygonalTightening.h +++ b/src/engine/PolygonalTightening.h @@ -16,9 +16,10 @@ #ifndef __PolygonalTightening_h__ #define __PolygonalTightening_h__ -#include -#include "NeuronIndex.h" #include "Map.h" +#include "NeuronIndex.h" + +#include class PolygonalTightening { @@ -26,9 +27,11 @@ class PolygonalTightening enum PolygonalBoundType { LB = 0, UB = 1, - }; + }; - PolygonalTightening( Map neuronToCoefficient, double value, PolygonalBoundType type ) + PolygonalTightening( Map neuronToCoefficient, + double value, + PolygonalBoundType type ) : _neuronToCoefficient( neuronToCoefficient ) , _value( value ) , _type( type ) @@ -62,20 +65,20 @@ class PolygonalTightening bool currentFound = false; for ( const auto &otherPair : other._neuronToCoefficient ) { - currentFound |= ( pair.first._layer == otherPair.first._layer - && pair.first._neuron == otherPair.first._neuron - && pair.second == otherPair.second ); + currentFound |= ( pair.first._layer == otherPair.first._layer && + pair.first._neuron == otherPair.first._neuron && + pair.second == otherPair.second ); } allFound &= currentFound; } bool result = allFound && _value == other._value && _type == other._type; - return result; + return result; } void dump() const { printf( "PolygonalTightening: x %s %.2lf\n", _type == LB ? ">=" : "<=", _value ); - + for ( const auto &pair : _neuronToCoefficient ) { printf( "NeuronIndex: (layer %u, neuron %u), Coefficient: %.2lf )\n", @@ -94,4 +97,4 @@ class PolygonalTightening // tags-file-name: "../../TAGS" // c-basic-offset: 4 // End: -//s +// s From 32474bce5dfd329856d9b5c4133771ecb1599a7a Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 21:00:34 +0200 Subject: [PATCH 35/72] Add files via upload --- src/nlr/tests/Test_NetworkLevelReasoner.h | 2937 +++++++++++---------- 1 file changed, 1471 insertions(+), 1466 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 0c16ae1601..977faa4ed6 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -14,10 +14,10 @@ **/ #include "../../engine/tests/MockTableau.h" // TODO: fix this +#include "DeepPolySoftmaxElement.h" #include "FloatUtils.h" #include "Layer.h" #include "NetworkLevelReasoner.h" -#include "DeepPolySoftmaxElement.h" #include "Options.h" #include "Query.h" #include "Tightening.h" @@ -188,7 +188,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - + void populateNetworkWithAbs( NLR::NetworkLevelReasoner &nlr ) { /* @@ -260,7 +260,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - + void populateNetworkWithSign( NLR::NetworkLevelReasoner &nlr ) { /* @@ -332,7 +332,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - + void populateNetworkWithRound( NLR::NetworkLevelReasoner &nlr ) { /* @@ -404,7 +404,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - + void populateNetworkWithLeakyRelu( NLR::NetworkLevelReasoner &nlr ) { /* @@ -422,7 +422,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - + nlr.getLayer( 2 )->setAlpha( 0.1 ); nlr.getLayer( 4 )->setAlpha( 0.1 ); @@ -479,16 +479,15 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - - + void populateNetworkWithMax( NLR::NetworkLevelReasoner &nlr ) { /* a - x b e + x b e g - y c f + y c f d */ @@ -550,7 +549,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); } - + void populateNetworkWithSoftmax( NLR::NetworkLevelReasoner &nlr ) { /* @@ -630,14 +629,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - + void populateNetworkWithBilinear( NLR::NetworkLevelReasoner &nlr ) { /* a - x b e + x b e g - y c f + y c f d */ @@ -699,7 +698,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); } - + void populateNetworkWithAbsAndRelu( NLR::NetworkLevelReasoner &nlr ) { /* @@ -771,7 +770,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - + void populateNetworkWithRoundAndSign( NLR::NetworkLevelReasoner &nlr ) { /* @@ -843,7 +842,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - + void populateNetworkWithLeakyReluAndSigmoid( NLR::NetworkLevelReasoner &nlr ) { /* @@ -861,7 +860,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - + nlr.getLayer( 2 )->setAlpha( 0.1 ); // Mark layer dependencies @@ -917,7 +916,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 12 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 13 ); } - + void populateNetworkWithSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr ) { /* @@ -991,7 +990,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); } - + void populateNetworkWithReluAndBilinear( NLR::NetworkLevelReasoner &nlr ) { /* @@ -1059,7 +1058,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); } - + void populateNetworkSBTRelu( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1124,7 +1123,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); } - + void populateNetworkSBTReluResidual1( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1192,7 +1191,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 5, -large ); tableau.setUpperBound( 5, large ); } - + void populateNetworkSBTReluResidual2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1264,7 +1263,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); } - + void populateNetworkSBTReluReindex( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1365,7 +1364,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - + void populateNetworkSBTLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1470,7 +1469,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - + void populateNetworkSBTSigmoidsAndRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1553,7 +1552,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 9, -large ); tableau.setUpperBound( 9, large ); } - + void populateNetworkSBTMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1623,7 +1622,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 7, -large ); tableau.setUpperBound( 7, large ); } - + void populateNetworkSBTSoftmax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1725,7 +1724,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 10, -large ); tableau.setUpperBound( 10, large ); } - + void populateNetworkSBTSoftmax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1870,7 +1869,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 16, -large ); tableau.setUpperBound( 16, large ); } - + void populateNetworkSBTBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1932,7 +1931,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 5, -large ); tableau.setUpperBound( 5, large ); } - + void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -1960,7 +1959,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); nlr.addLayer( 4, NLR::Layer::RELU, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - + nlr.getLayer( 2 )->setAlpha( 0.1 ); nlr.getLayer( 4 )->setAlpha( 0.1 ); @@ -2037,7 +2036,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - + void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -2048,31 +2047,31 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5 x9 x18 x20 x2 x13 x16 x6 x10 - + x3 = -x0 + x1 x4 = x0 + 2*x1 x5 = x0 + x1 + x2 x6 = 3*x0 - 2*x1 - x2 - + x7 = ReLU( x3 ) x8 = ReLU( x4 ) x9 = ReLU( x5 ) x10 = ReLU( x6 ) - + x11 = 2x7 + x9 - x10 + 2 x12 = -x7 + 2x8 + x10 x13 = x7 - x8 + 2x9 - 2 - + x14 = ReLU( x11 ) x15 = ReLU( x12 ) x16 = ReLU( x13 ) - + x17 = -x14 + x15 - x16 x18 = x14 + x15 + x16 - + x19 = ReLU( x17 ) x20 = ReLU( x18 ) - + x21 = x19 - x20 - 1 */ @@ -2118,7 +2117,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 4, 1, 5, 1, 1 ); nlr.setWeight( 4, 2, 5, 0, -1 ); nlr.setWeight( 4, 2, 5, 1, 1 ); - + nlr.setWeight( 6, 0, 7, 0, 1 ); nlr.setWeight( 6, 1, 7, 0, -1 ); @@ -2135,7 +2134,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); nlr.addActivationSource( 3, 2, 4, 2 ); - + nlr.addActivationSource( 5, 0, 6, 0 ); nlr.addActivationSource( 5, 1, 6, 1 ); @@ -2164,10 +2163,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); // Very loose bounds for neurons except inputs @@ -2213,7 +2212,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); } - + void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -2315,7 +2314,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - + void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -2326,31 +2325,31 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5 x9 x18 x20 x2 x13 x16 x6 x10 - + x3 = -x0 + x1 x4 = x0 + 2*x1 x5 = x0 + x1 + x2 x6 = 3*x0 - 2*x1 - x2 - + x7 = Sigmoid( x3 ) x8 = Sigmoid( x4 ) x9 = Sigmoid( x5 ) x10 = Sigmoid( x6 ) - + x11 = 2x7 + x9 - x10 + 2 x12 = -x7 + 2x8 + x10 x13 = x7 - x8 + 2x9 - 2 - + x14 = Sigmoid( x11 ) x15 = Sigmoid( x12 ) x16 = Sigmoid( x13 ) - + x17 = -x14 + x15 - x16 x18 = x14 + x15 + x16 - + x19 = Sigmoid( x17 ) x20 = Sigmoid( x18 ) - + x21 = x19 - x20 - 1 */ @@ -2396,7 +2395,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 4, 1, 5, 1, 1 ); nlr.setWeight( 4, 2, 5, 0, -1 ); nlr.setWeight( 4, 2, 5, 1, 1 ); - + nlr.setWeight( 6, 0, 7, 0, 1 ); nlr.setWeight( 6, 1, 7, 0, -1 ); @@ -2413,7 +2412,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); nlr.addActivationSource( 3, 2, 4, 2 ); - + nlr.addActivationSource( 5, 0, 6, 0 ); nlr.addActivationSource( 5, 1, 6, 1 ); @@ -2442,10 +2441,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); // Very loose bounds for neurons except inputs @@ -2491,7 +2490,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); } - + void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -2593,7 +2592,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - + void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -2604,31 +2603,31 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5 x9 x18 x20 x2 x13 x16 x6 x10 - + x3 = -x0 + x1 x4 = x0 + 2*x1 x5 = x0 + x1 + x2 x6 = 3*x0 - 2*x1 - x2 - + x7 = Sign( x3 ) x8 = Sign( x4 ) x9 = SIgn( x5 ) x10 = Sign( x6 ) - + x11 = 2x7 + x9 - x10 + 2 x12 = -x7 + 2x8 + x10 x13 = x7 - x8 + 2x9 - 2 - + x14 = Sign( x11 ) x15 = Sign( x12 ) x16 = Sign( x13 ) - + x17 = -x14 + x15 - x16 x18 = x14 + x15 + x16 - + x19 = Sign( x17 ) x20 = Sign( x18 ) - + x21 = x19 - x20 - 1 */ @@ -2674,7 +2673,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 4, 1, 5, 1, 1 ); nlr.setWeight( 4, 2, 5, 0, -1 ); nlr.setWeight( 4, 2, 5, 1, 1 ); - + nlr.setWeight( 6, 0, 7, 0, 1 ); nlr.setWeight( 6, 1, 7, 0, -1 ); @@ -2691,7 +2690,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); nlr.addActivationSource( 3, 2, 4, 2 ); - + nlr.addActivationSource( 5, 0, 6, 0 ); nlr.addActivationSource( 5, 1, 6, 1 ); @@ -2720,10 +2719,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); // Very loose bounds for neurons except inputs @@ -2769,7 +2768,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); } - + void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -2871,7 +2870,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - + void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -2882,31 +2881,31 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5 x9 x18 x20 x2 x13 x16 x6 x10 - + x3 = -x0 + x1 x4 = x0 + 2*x1 x5 = x0 + x1 + x2 x6 = 3*x0 - 2*x1 - x2 - + x7 = Round( x3 ) x8 = Round( x4 ) x9 = Round( x5 ) x10 = Round( x6 ) - + x11 = 2x7 + x9 - x10 + 2 x12 = -x7 + 2x8 + x10 x13 = x7 - x8 + 2x9 - 2 - + x14 = Round( x11 ) x15 = Round( x12 ) x16 = Round( x13 ) - + x17 = -x14 + x15 - x16 x18 = x14 + x15 + x16 - + x19 = Round( x17 ) x20 = Round( x18 ) - + x21 = x19 - x20 - 1 */ @@ -2952,7 +2951,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 4, 1, 5, 1, 1 ); nlr.setWeight( 4, 2, 5, 0, -1 ); nlr.setWeight( 4, 2, 5, 1, 1 ); - + nlr.setWeight( 6, 0, 7, 0, 1 ); nlr.setWeight( 6, 1, 7, 0, -1 ); @@ -2969,7 +2968,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); nlr.addActivationSource( 3, 2, 4, 2 ); - + nlr.addActivationSource( 5, 0, 6, 0 ); nlr.addActivationSource( 5, 1, 6, 1 ); @@ -2998,10 +2997,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); // Very loose bounds for neurons except inputs @@ -3047,7 +3046,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); } - + void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -3149,7 +3148,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - + void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -3160,31 +3159,31 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5 x9 x18 x20 x2 x13 x16 x6 x10 - + x3 = -x0 + x1 x4 = x0 + 2*x1 x5 = x0 + x1 + x2 x6 = 3*x0 - 2*x1 - x2 - + x7 = Abs( x3 ) x8 = Abs( x4 ) x9 = Abs( x5 ) x10 = Abs( x6 ) - + x11 = 2x7 + x9 - x10 + 2 x12 = -x7 + 2x8 + x10 x13 = x7 - x8 + 2x9 - 2 - + x14 = Abs( x11 ) x15 = Abs( x12 ) x16 = Abs( x13 ) - + x17 = -x14 + x15 - x16 x18 = x14 + x15 + x16 - + x19 = Abs( x17 ) x20 = Abs( x18 ) - + x21 = x19 - x20 - 1 */ @@ -3230,7 +3229,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 4, 1, 5, 1, 1 ); nlr.setWeight( 4, 2, 5, 0, -1 ); nlr.setWeight( 4, 2, 5, 1, 1 ); - + nlr.setWeight( 6, 0, 7, 0, 1 ); nlr.setWeight( 6, 1, 7, 0, -1 ); @@ -3247,7 +3246,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); nlr.addActivationSource( 3, 2, 4, 2 ); - + nlr.addActivationSource( 5, 0, 6, 0 ); nlr.addActivationSource( 5, 1, 6, 1 ); @@ -3276,10 +3275,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); // Very loose bounds for neurons except inputs @@ -3325,7 +3324,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); } - + void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -3352,7 +3351,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - + nlr.getLayer( 2 )->setAlpha( 0.1 ); nlr.getLayer( 4 )->setAlpha( 0.1 ); @@ -3429,7 +3428,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - + void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* @@ -3440,31 +3439,31 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5 x9 x18 x20 x2 x13 x16 x6 x10 - + x3 = -x0 + x1 x4 = x0 + 2*x1 x5 = x0 + x1 + x2 x6 = 3*x0 - 2*x1 - x2 - + x7 = LeakyReLU( x3 ) x8 = LeakyReLU( x4 ) x9 = LeakyReLU( x5 ) x10 = LeakyReLU( x6 ) - + x11 = 2x7 + x9 - x10 + 2 x12 = -x7 + 2x8 + x10 x13 = x7 - x8 + 2x9 - 2 - + x14 = LeakyReLU( x11 ) x15 = LeakyReLU( x12 ) x16 = LeakyReLU( x13 ) - + x17 = -x14 + x15 - x16 x18 = x14 + x15 + x16 - + x19 = LeakyReLU( x17 ) x20 = LeakyReLU( x18 ) - + x21 = x19 - x20 - 1 */ @@ -3477,7 +3476,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - + nlr.getLayer( 2 )->setAlpha( 0.1 ); nlr.getLayer( 4 )->setAlpha( 0.1 ); nlr.getLayer( 6 )->setAlpha( 0.1 ); @@ -3514,7 +3513,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setWeight( 4, 1, 5, 1, 1 ); nlr.setWeight( 4, 2, 5, 0, -1 ); nlr.setWeight( 4, 2, 5, 1, 1 ); - + nlr.setWeight( 6, 0, 7, 0, 1 ); nlr.setWeight( 6, 1, 7, 0, -1 ); @@ -3531,7 +3530,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); nlr.addActivationSource( 3, 2, 4, 2 ); - + nlr.addActivationSource( 5, 0, 6, 0 ); nlr.addActivationSource( 5, 1, 6, 1 ); @@ -3560,10 +3559,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); // Very loose bounds for neurons except inputs @@ -3609,8 +3608,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 21, -large ); tableau.setUpperBound( 21, large ); } - - void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + + void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) { /* a a' @@ -3708,32 +3708,33 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - - - void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + + + void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) { /* x3 x7 x0 x11 x14 x4 x8 x1 x12 x15 x17 - x5 x9 + x5 x9 x2 x13 x16 x6 x10 - + x3 = -x0 + x1 x4 = x0 + 2*x1 x5 = x0 + x1 + x2 x6 = 3*x0 - 2*x1 - x2 - + x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) - + x11 = 2x7 + x9 - x10 + 2 x12 = -x7 + 2x8 + x10 x13 = x7 - x8 + 2x9 - 2 - + x14, x15, x16 = Softmax( x11, x12, x13 ) - + x17 = Max( x14, x15, x16 ) */ @@ -3801,7 +3802,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 2 ); nlr.addActivationSource( 3, 1, 4, 2 ); nlr.addActivationSource( 3, 2, 4, 2 ); - + nlr.addActivationSource( 4, 0, 5, 0 ); nlr.addActivationSource( 4, 1, 5, 0 ); nlr.addActivationSource( 4, 2, 5, 0 ); @@ -3866,8 +3867,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 17, -large ); tableau.setUpperBound( 17, large ); } - - void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + + void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) { /* a a' @@ -3959,8 +3961,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); } - - void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + + void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) { /* x3 x7 @@ -3970,23 +3973,23 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5 x9 x12 x14 x2 x6 x10 - + x3 = -x0 + x1 x4 = x0 + 2*x1 x5 = x0 + x1 + x2 x6 = 3*x0 - 2*x1 - x2 - + x7 = ReLU( x3 ) x8 = ReLU( x4 ) x9 = ReLU( x5 ) x10 = ReLU( x6 ) - + x11 = 2x7 + x9 - x10 + 2 x12 = -x7 + 2x8 + x10 - + x13 = ReLU( x11 ) x14 = ReLU( x12 ) - + x15 = x13 * x14 */ @@ -4031,7 +4034,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addActivationSource( 3, 0, 4, 0 ); nlr.addActivationSource( 3, 1, 4, 1 ); - + nlr.addActivationSource( 4, 0, 5, 0 ); nlr.addActivationSource( 4, 1, 5, 0 ); @@ -4163,7 +4166,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], 0.5045, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], 2.1957, 0.0001 ) ); } - + void test_evaluate_abs() { NLR::NetworkLevelReasoner nlr; @@ -4200,7 +4203,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], 10 ) ); } - + void test_evaluate_sign() { NLR::NetworkLevelReasoner nlr; @@ -4237,7 +4240,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], -1 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], -4 ) ); } - + void test_evaluate_round() { NLR::NetworkLevelReasoner nlr; @@ -4274,8 +4277,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], 0, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], -12, 0.0001 ) ); } - - void test_evaluate_leaky_relu() + + void test_evaluate_leaky_relu() { NLR::NetworkLevelReasoner nlr; @@ -4311,7 +4314,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], -0.04, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], -0.76, 0.0001 ) ); } - + void test_evaluate_max() { NLR::NetworkLevelReasoner nlr; @@ -4345,8 +4348,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], -5 ) ); } - - + + void test_evaluate_softmax() { NLR::NetworkLevelReasoner nlr; @@ -4375,12 +4378,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // With Softmax, case 2 input[0] = -3; input[1] = 3; - + TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); TS_ASSERT( FloatUtils::areEqual( output[0], 0.1206, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], 2.7588, 0.0001 ) ); } - + void test_evaluate_bilinear() { NLR::NetworkLevelReasoner nlr; @@ -4395,7 +4398,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite input[1] = 0; TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - + TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); // With Bilinear, case 1 @@ -4412,7 +4415,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); TS_ASSERT( FloatUtils::areEqual( output[0], 0.0912, 0.0001 ) ); } - + void test_evaluate_non_consecutive_layers() { NLR::NetworkLevelReasoner nlr; @@ -4487,7 +4490,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; populateNetworkWithAbsAndRelu( nlr ); - + double input[2]; double output[2]; @@ -4507,13 +4510,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], 4 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); } - + void test_evaluate_round_and_sign() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithRoundAndSign( nlr ); - + double input[2]; double output[2]; @@ -4530,16 +4533,16 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001) ); + TS_ASSERT( FloatUtils::areEqual( output[0], -1, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], -4, 0.0001 ) ); } - + void test_evaluate_leaky_relu_and_sigmoid() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithLeakyReluAndSigmoid( nlr ); - + double input[2]; double output[2]; @@ -4559,13 +4562,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], 0.4013, 0.0001 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], 0.6508, 0.0001 ) ); } - + void test_evaluate_softmax_and_max() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithSoftmaxAndMax( nlr ); - + double input[2]; double output[1]; @@ -4583,13 +4586,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], -1.0000, 0.0001 ) ); } - + void test_evaluate_relu_and_bilinear() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithReluAndBilinear( nlr ); - + double input[2]; double output[1]; @@ -4689,7 +4692,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_store_into_other_with_round() { NLR::NetworkLevelReasoner nlr; @@ -4724,7 +4727,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_store_into_other_with_sign() { NLR::NetworkLevelReasoner nlr; @@ -4759,7 +4762,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_store_into_other_with_abs() { NLR::NetworkLevelReasoner nlr; @@ -4794,7 +4797,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_store_into_other_with_leaky_relu() { NLR::NetworkLevelReasoner nlr; @@ -4829,7 +4832,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_store_into_other_with_max() { NLR::NetworkLevelReasoner nlr; @@ -4864,7 +4867,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_store_into_other_with_softmax() { NLR::NetworkLevelReasoner nlr; @@ -4899,7 +4902,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_store_into_other_with_bilinear() { NLR::NetworkLevelReasoner nlr; @@ -4934,7 +4937,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } - + void test_generate_input_query() { NLR::NetworkLevelReasoner nlr; @@ -5187,7 +5190,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.0001 ) ); } } - + void test_simulate_round() { NLR::NetworkLevelReasoner nlr; @@ -5263,7 +5266,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.0001 ) ); } } - + void test_simulate_sign() { NLR::NetworkLevelReasoner nlr; @@ -5335,7 +5338,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -4 ) ); } } - + void test_simulate_abs() { NLR::NetworkLevelReasoner nlr; @@ -5407,7 +5410,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 10 ) ); } } - + void test_simulate_leaky_relu() { NLR::NetworkLevelReasoner nlr; @@ -5483,7 +5486,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.0001 ) ); } } - + void test_simulate_max() { NLR::NetworkLevelReasoner nlr; @@ -5540,7 +5543,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -5 ) ); } } - + void test_simulate_softmax() { NLR::NetworkLevelReasoner nlr; @@ -5618,7 +5621,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.0001 ) ); } } - + void test_simulate_bilinear() { NLR::NetworkLevelReasoner nlr; @@ -5756,9 +5759,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void test_simulate_abs_and_relu() { NLR::NetworkLevelReasoner nlr; - + populateNetworkWithAbsAndRelu( nlr ); - + unsigned simulationSize = Options::get()->getInt( Options::NUMBER_OF_SIMULATIONS ); // Simulate1 @@ -5803,7 +5806,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 4 ) ); } } - + void test_simulate_round_and_sign() { NLR::NetworkLevelReasoner nlr; @@ -5854,7 +5857,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -4 ) ); } } - + void test_simulate_leaky_relu_and_sigmoid() { NLR::NetworkLevelReasoner nlr; @@ -5909,7 +5912,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.0001 ) ); } } - + void test_simulate_softmax_and_max() { NLR::NetworkLevelReasoner nlr; @@ -5952,7 +5955,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0.0001 ) ); } } - + void test_simulate_relu_and_bilinear() { NLR::NetworkLevelReasoner nlr; @@ -5993,7 +5996,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite 0 ) ); } } - + void test_interval_arithmetic_bound_propagation_relu_constraints() { NLR::NetworkLevelReasoner nlr; @@ -6326,7 +6329,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_sign_constraints() { NLR::NetworkLevelReasoner nlr; @@ -6459,12 +6462,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_leaky_relu_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithLeakyRelu( nlr ); - + MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -6512,14 +6515,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), - + Tightening( 9, -0.28, Tightening::LB ), Tightening( 9, 10.1, Tightening::UB ), Tightening( 11, -0.38, Tightening::LB ), Tightening( 11, 9.1, Tightening::UB ), @@ -6579,7 +6582,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), - + Tightening( 9, -0.34, Tightening::LB ), Tightening( 9, 7.1, Tightening::UB ), Tightening( 11, -0.32, Tightening::LB ), Tightening( 11, 7.3, Tightening::UB ), @@ -6590,13 +6593,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_round_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithRound( nlr ); - - + + MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -6644,14 +6647,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), - + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 11, Tightening::UB ), Tightening( 11, -7, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), @@ -6704,14 +6707,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - + Tightening( 9, -16, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), Tightening( 11, -15, Tightening::LB ), Tightening( 11, 2, Tightening::UB ), @@ -6722,13 +6725,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_sigmoid_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithSigmoids( nlr ); - - + + MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -6776,14 +6779,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - + Tightening( 3, 0.5000, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), Tightening( 5, 0.0067, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.7311, Tightening::UB ), Tightening( 8, -0.2244, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), Tightening( 10, 0.3948, Tightening::LB ), Tightening( 10, 2.2244, Tightening::UB ), - + Tightening( 9, 0.4441, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), Tightening( 11, 0.5974, Tightening::LB ), Tightening( 11, 0.9024, Tightening::UB ), @@ -6836,14 +6839,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - + Tightening( 3, 0.1192, Tightening::LB ), Tightening( 3, 0.8808, Tightening::UB ), Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9933, Tightening::UB ), Tightening( 7, 0.2689, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), Tightening( 8, -0.7616, Tightening::LB ), Tightening( 8, 1.6052, Tightening::UB ), Tightening( 10, 0.2384, Tightening::LB ), Tightening( 10, 2.6052, Tightening::UB ), - + Tightening( 9, 0.3183, Tightening::LB ), Tightening( 9, 0.8327, Tightening::UB ), Tightening( 11, 0.5593, Tightening::LB ), Tightening( 11, 0.9312, Tightening::UB ), @@ -6854,13 +6857,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_max_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithMax( nlr ); - - + + MockTableau tableau; tableau.getBoundManager().initialize( 12 ); @@ -6905,21 +6908,21 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 3, -5, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), Tightening( 8, -1, Tightening::LB ), Tightening( 8, 10, Tightening::UB ), Tightening( 9, -8, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 10, Tightening::UB ), - + Tightening( 11, -10, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds @@ -6960,28 +6963,28 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 3, -8, Tightening::LB ), Tightening( 3, 9, Tightening::UB ), Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), Tightening( 5, -6, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 9, Tightening::UB ), Tightening( 7, -5, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), Tightening( 8, -5, Tightening::LB ), Tightening( 8, 16, Tightening::UB ), Tightening( 9, -14, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), - + Tightening( 10, -5, Tightening::LB ), Tightening( 10, 16, Tightening::UB ), - + Tightening( 11, -16, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_softmax_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithSoftmax( nlr ); - - + + MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -7029,14 +7032,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - + Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - + Tightening( 9, 0.0240, Tightening::LB ), Tightening( 9, 0.8349, Tightening::UB ), Tightening( 11, 0.1651, Tightening::LB ), Tightening( 11, 0.9759, Tightening::UB ), @@ -7089,14 +7092,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), - + Tightening( 9, 0.0184, Tightening::LB ), Tightening( 9, 0.8678, Tightening::UB ), Tightening( 11, 0.1322, Tightening::LB ), Tightening( 11, 0.9816, Tightening::UB ), @@ -7107,13 +7110,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_bilinear_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithBilinear( nlr ); - - + + MockTableau tableau; tableau.getBoundManager().initialize( 12 ); @@ -7158,21 +7161,21 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 3, -0.5, Tightening::LB ), Tightening( 3, 0.5, Tightening::UB ), Tightening( 4, -0.3, Tightening::LB ), Tightening( 4, 0.3, Tightening::UB ), Tightening( 5, -0.3, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - + Tightening( 6, -0.55, Tightening::LB ), Tightening( 6, 0.55, Tightening::UB ), Tightening( 7, -0.09, Tightening::LB ), Tightening( 7, 0.09, Tightening::UB ), Tightening( 8, 1.36, Tightening::LB ), Tightening( 8, 2.64, Tightening::UB ), Tightening( 9, -0.64, Tightening::LB ), Tightening( 9, 0.64, Tightening::UB ), - + Tightening( 10, -1.6896, Tightening::LB ), Tightening( 10, 1.6896, Tightening::UB ), - + Tightening( 11, -1.6896, Tightening::LB ), Tightening( 11, 1.6896, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds @@ -7213,22 +7216,22 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 3, -0.8, Tightening::LB ), Tightening( 3, 0.9, Tightening::UB ), Tightening( 4, -0.5, Tightening::LB ), Tightening( 4, 0.5, Tightening::UB ), Tightening( 5, -0.6, Tightening::LB ), Tightening( 5, 0.3, Tightening::UB ), - + Tightening( 6, -0.88, Tightening::LB ), Tightening( 6, 0.99, Tightening::UB ), Tightening( 7, -0.3, Tightening::LB ), Tightening( 7, 0.3, Tightening::UB ), Tightening( 8, 0.82, Tightening::LB ), Tightening( 8, 3.29, Tightening::UB ), Tightening( 9, -1.29, Tightening::LB ), Tightening( 9, 1.18, Tightening::UB ), - + Tightening( 10, -4.2441, Tightening::LB ), Tightening( 10, 3.8822, Tightening::UB ), - + Tightening( 11, -3.8822, Tightening::LB ), Tightening( 11, 4.2441, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_abs_and_relu_constraints() { NLR::NetworkLevelReasoner nlr; @@ -7357,16 +7360,16 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 12, 0, Tightening::LB ), Tightening( 12, 14, Tightening::UB ), Tightening( 13, 0, Tightening::LB ), Tightening( 13, 56, Tightening::UB ), } ); - + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_round_and_sign_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithRoundAndSign( nlr ); - + MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -7414,14 +7417,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, 2.4, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), Tightening( 4, -3.5, Tightening::LB ), Tightening( 4, 7.4, Tightening::UB ), Tightening( 6, -1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - + Tightening( 3, 2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), Tightening( 5, -4, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), Tightening( 8, -4, Tightening::LB ), Tightening( 8, 11, Tightening::UB ), Tightening( 10, -7, Tightening::LB ), Tightening( 10, 8, Tightening::UB ), - + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), @@ -7431,7 +7434,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds @@ -7475,14 +7478,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, -2.1, Tightening::LB ), Tightening( 2, 2.6, Tightening::UB ), Tightening( 4, -12.5, Tightening::LB ), Tightening( 4, -1, Tightening::UB ), Tightening( 6, 1.4, Tightening::LB ), Tightening( 6, 2.1, Tightening::UB ), - + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), Tightening( 5, -12, Tightening::LB ), Tightening( 5, -1, Tightening::UB ), Tightening( 7, 1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), Tightening( 8, -16, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), Tightening( 10, -15, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), Tightening( 11, -1, Tightening::LB ), Tightening( 11, 1, Tightening::UB ), @@ -7493,12 +7496,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_leaky_relu_and_sigmoid_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithLeakyReluAndSigmoid( nlr ); - + MockTableau tableau; tableau.getBoundManager().initialize( 14 ); @@ -7546,14 +7549,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, 0, Tightening::LB ), Tightening( 2, 3, Tightening::UB ), Tightening( 4, -8, Tightening::LB ), Tightening( 4, 7, Tightening::UB ), Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), Tightening( 5, -0.8, Tightening::LB ), Tightening( 5, 7, Tightening::UB ), Tightening( 7, -0.1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), Tightening( 8, -2.8, Tightening::LB ), Tightening( 8, 10.1, Tightening::UB ), Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 9.1, Tightening::UB ), - + Tightening( 9, 0.0573, Tightening::LB ), Tightening( 9, 0.9999, Tightening::UB ), Tightening( 11, 0.0219, Tightening::LB ), Tightening( 11, 0.9999, Tightening::UB ), @@ -7563,7 +7566,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds @@ -7614,7 +7617,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 8, -3.4, Tightening::LB ), Tightening( 8, 7.1, Tightening::UB ), Tightening( 10, -3.2, Tightening::LB ), Tightening( 10, 7.3, Tightening::UB ), - + Tightening( 9, 0.0323, Tightening::LB ), Tightening( 9, 0.9992, Tightening::UB ), Tightening( 11, 0.0392, Tightening::LB ), Tightening( 11, 0.9993, Tightening::UB ), @@ -7625,13 +7628,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_softmax_and_max_constraints() { NLR::NetworkLevelReasoner nlr; populateNetworkWithSoftmaxAndMax( nlr ); - - + + MockTableau tableau; tableau.getBoundManager().initialize( 12 ); @@ -7671,26 +7674,34 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Perform the tightening pass TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - List expectedBounds( { - Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.0066, Tightening::LB ), Tightening( 3, 0.9517, Tightening::UB ), - Tightening( 5, 0.0007, Tightening::LB ), Tightening( 5, 0.9909, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.7297, Tightening::UB ), - - Tightening( 8, -0.7225, Tightening::LB ), Tightening( 8, 1.9403, Tightening::UB ), - Tightening( 9, 0.3192, Tightening::LB ), Tightening( 9, 2.9819, Tightening::UB ), - - Tightening( 10, 0.3192, Tightening::LB ), Tightening( 10, 2.9819, Tightening::UB ), - - Tightening( 11, -2.9819, Tightening::LB ), Tightening( 11, -0.3192, Tightening::UB ) - } ); + List expectedBounds( { Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), + Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.0066, Tightening::LB ), + Tightening( 3, 0.9517, Tightening::UB ), + Tightening( 5, 0.0007, Tightening::LB ), + Tightening( 5, 0.9909, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), + Tightening( 7, 0.7297, Tightening::UB ), + + Tightening( 8, -0.7225, Tightening::LB ), + Tightening( 8, 1.9403, Tightening::UB ), + Tightening( 9, 0.3192, Tightening::LB ), + Tightening( 9, 2.9819, Tightening::UB ), + + Tightening( 10, 0.3192, Tightening::LB ), + Tightening( 10, 2.9819, Tightening::UB ), + + Tightening( 11, -2.9819, Tightening::LB ), + Tightening( 11, -0.3192, Tightening::UB ) } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); // Change the current bounds @@ -7726,28 +7737,36 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Perform the tightening pass TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); - List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + List expectedBounds2( { Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), + Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 2, Tightening::UB ), - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.0654, Tightening::LB ), Tightening( 9, 2.9933, Tightening::UB ), - - Tightening( 10, 0.0654, Tightening::LB ), Tightening( 10, 2.9933, Tightening::UB ), - - Tightening( 11, -2.9933, Tightening::LB ), Tightening( 11, -0.0654, Tightening::UB ) - } ); + Tightening( 3, 0.0009, Tightening::LB ), + Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), + Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), + Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.0654, Tightening::LB ), + Tightening( 9, 2.9933, Tightening::UB ), + + Tightening( 10, 0.0654, Tightening::LB ), + Tightening( 10, 2.9933, Tightening::UB ), + + Tightening( 11, -2.9933, Tightening::LB ), + Tightening( 11, -0.0654, Tightening::UB ) } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_interval_arithmetic_bound_propagation_relu_and_bilinear_constraints() { NLR::NetworkLevelReasoner nlr; @@ -7796,7 +7815,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 2, 0, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), Tightening( 6, -1, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), @@ -7805,7 +7824,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 9, -1, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), Tightening( 10, -7, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 7, Tightening::UB ), } ); @@ -7848,27 +7867,27 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 7, Tightening::UB ), Tightening( 9, -2, Tightening::LB ), Tightening( 9, 7, Tightening::UB ), Tightening( 10, -14, Tightening::LB ), Tightening( 10, 49, Tightening::UB ), - - Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), + + Tightening( 11, -49, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); } - + void test_sbt_relus_all_active() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); @@ -8155,7 +8174,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void test_sbt_relu_residual1() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -8180,20 +8199,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ReLU is undecided, bound is concretized. Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - + x2.lb = 0.5x0 - x2.ub = 0.5x0 + 0.5 + x2.ub = 0.5x0 + 0.5 x2 range: [0, 1] - + Layer 2 (with residual from x0): x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] x3 range: [-1, 2.5] - + ReLU is undecided, bound is concretized. Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - + x4.lb = 0 x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] x4 range: [0, 2.5] @@ -8216,18 +8235,18 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2.5, Tightening::UB ), Tightening( 5, 2, Tightening::LB ), - Tightening( 5, 5.5, Tightening::UB ), + Tightening( 5, 5.5, Tightening::UB ), } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - + void test_sbt_relu_residual2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -8252,20 +8271,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite ReLU is undecided, bound is concretized. Coefficient: 1/( 1--1 ) = 1/2 = 0.5 - + x2.lb = 0.5x0 - x2.ub = 0.5x0 + 0.5 + x2.ub = 0.5x0 + 0.5 x2 range: [0, 1] - + Layer 2 (with residual from x0): x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] x3 range: [-1, 2.5] - + ReLU is undecided, bound is concretized. Coefficient: 2.5/( 2.5--1 ) = 2.5/3.5 = 5/7. - + x4.lb = 0 x4.ub = 5/7 ( -1.5x0 + 1 ) + 5/7 = -15/14 x0 + 20/14 : [5/14, 35/14 = 2.5] x4 range: [0, 2.5] @@ -8273,9 +8292,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 3 (with residual from x0): x5.lb = 3 ( 0 ) + 1 ( x0 ) + 1 = 1x0 + 1 : [0, 2] - x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] - x5 range: [0, 7.5] - + x5.ub = 3 ( -15/14 x0 + 20/14 ) + 1 ( x0 ) + 1 = -31/14 x0 + 74/14 : [43/14, 105/14 + = 7.5] x5 range: [0, 7.5] + Layer 4: x6.lb = 1x0 + 1 : [0, 2] x6.ub = -31/14 x0 + 74/14 : [43/14, 105/14 = 7.5] @@ -8301,11 +8320,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - + void test_sbt_relu_reindex() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -8330,41 +8349,41 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x2.lb = x0 + x1 : [-2, 2] x2.ub = x0 + x1 : [-2, 2] - + x3.lb = x0 - x1 : [-2, 2] x3.ub = x0 - x1 : [-2, 2] Both ReLUs are undecided, bounds are concretized. Coefficient: 2/( 2--2 ) = 2/4 = 0.5 - + x4.lb = 0.5 ( x0 + x1 ) = 0.5x0 + 0.5x1 x4.ub = 0.5 ( x0 + x1 ) + 1 = 0.5x0 + 0.5x1 + 1 x4 range: [0, 2] - + x5.lb = 0.5 ( x0 - x1 ) = 0.5x0 - 0.5x1 x5.ub = 0.5 ( x0 - x1 ) + 1 = 0.5x0 - 0.5x1 + 1 x5 range: [0, 2] - + Layer 2: x6.lb = 1 ( 0.5x0 + 0.5x1 ) + 1 ( 0.5x0 - 0.5x1 ) = x0 : [-1, 1] x6.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) + 1 ( 0.5x0 - 0.5x1 + 1 ) = x0 + 2 : [1, 3] x6 range: [-1, 3] - + x7.lb = 1 ( 0.5x0 + 0.5x1 ) - 1 ( 0.5x0 - 0.5x1 + 1 ) = x1 - 1 : [-2, 0] x7.ub = 1 ( 0.5x0 + 0.5x1 + 1 ) - 1 ( 0.5x0 - 0.5x1 ) = x1 + 1 : [0, 2] x7 range: [-2, 2] - + Both ReLUs are undecided, bounds are concretized. Coefficient (first ReLU, lower): 1/( 1--1 ) = 1/2 = 0.5 Coefficient (first ReLU, upper): 1 (propagated as is) Coefficient (second ReLU, lower): 0 (bound is zero) Coefficient (second ReLU, upper): 2/( 2--2 ) = 2/4 = 0.5 - + x8.lb = 0.5 ( x0 ) = 0.5x0 x8.ub = x0 + 2 x8 range: [0, 3] - + x9.lb = 0 x9.ub = 0.5 ( x1 + 1 ) + 1 = 0.5x1 + 1.5 x9 range: [0, 2] @@ -8374,11 +8393,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x10.lb = 1 ( 0.5x0 ) + 1 ( 0 ) + 1 = 0.5x0 + 1 : [0.5, 1.5] x10.ub = 1 ( x0 + 2 ) + 1 ( 0.5x1 + 1.5 ) + 1 = x0 + 0.5x1 + 4.5 : [3, 6] x10 range: [0.5, 6] - + x11.lb = 0.5x1 - 0.5 x11.ub = 0.5x1 + 1.5 x11 range: [0, 2] - + */ List expectedBounds( @@ -8388,7 +8407,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), @@ -8403,7 +8422,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - + void test_sbt_abs_all_positive() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); @@ -8635,7 +8654,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - + void test_sbt_absolute_values_positive_and_not_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); @@ -8874,7 +8893,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - + void test_sbt_signs_positive_and_not_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); @@ -9088,7 +9107,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Second sign is positive, bounds become constant 1 x4: all set to -1 - + x5: all set to 1 Layer 2: @@ -9116,7 +9135,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } - + void test_sbt_leaky_relu() { NLR::NetworkLevelReasoner nlr; @@ -9143,43 +9162,43 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x2.lb = x0 + x1 : [-2, 2] x2.ub = x0 + x1 : [-2, 2] - + x3.lb = x0 - x1 : [-2, 2] x3.ub = x0 - x1 : [-2, 2] Both LeakyReLUs are undecided, bounds are concretized. Coefficient: ( 2 - 0.2*-2 )/( 2--2 ) = 2.4/4 = 0.6 Bias: ( 0.2 - 1 ) * 2 * -2 / ( 2--2 ) = 0.8 - + x4.lb = x0 + x1 x4.ub = 0.6 ( x0 + x1 ) + 0.8 = 0.6x0 + 0.6x1 + 0.8 x4 range: [-0.4, 2] - + x5.lb = x0 - x1 x5.ub = 0.6 ( x0 - x1 ) + 0.8 = 0.6x0 - 0.6x1 + 0.8 x5 range: [-0.4, 2] - + Layer 2: x6.lb = 1 ( x0 + x1 ) + 1 ( x0 - x1 ) = 2x0 : [-2, 2] x6.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) + 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 1.2x0 + 1.6 : [0.4, 2.8] x6 range: [-2, 2.8] - + x7.lb = 1 ( x0 + x1 ) - 1 ( 0.6x0 - 0.6x1 + 0.8 ) = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] x7.ub = 1 ( 0.6x0 + 0.6x1 + 0.8 ) - 1 ( x0 - x1 ) = -0.4x0 + 1.6x1 + 0.8 : [-1.2, 2.8] x7 range: [-2.8, 2.8] - + Both LeakyReLUs are undecided, bounds are concretized. Coefficient (first LeakyReLU): ( 2.8 - 0.2*-2 )/( 2.8--2 ) = 3.2/4.8 = 10/15 Bias (first LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2 / ( 2.8--2 ) = 14/15 - + Coefficient (second LeakyReLU): ( 2.8 - 0.2*-2.8 )/( 2.8--2.8 ) = 3.36/5.6 = 0.6 Bias (second LeakyReLU): ( 0.2 - 1 ) * 2.8 * -2.8 / ( 2.8--2.8 ) = 1.12 - + x8.lb = 2x0 x8.ub = 10/15 ( 1.2x0 + 1.6 ) + 14/15 = 0.8x0 + 2 x8 range: [-0.4, 2.8] - + x9.lb = 0.4x0 + 1.6x1 - 0.8 x9.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 x9 range: [-0.56, 2.8] @@ -9187,30 +9206,30 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 3: x10.lb = 1 ( 0.4x0 + 1.6x1 - 0.8 ) + 1 ( 2x0 ) + 1 = 2.4x0 + 1.6x1 + 0.2 : [-3.8, 5.2] - x10.ub = 1 ( -0.24 x0 + 0.96 x1 + 1.6 ) + 1 ( 0.8x0 + 2 ) + 1 = 0.56x0 + 0.96x1 + 4.6 : [3.08, 6.12] - x10 range: [-3.8, 6.12] - + x10.ub = 1 ( -0.24 x0 + 0.96 x1 + 1.6 ) + 1 ( 0.8x0 + 2 ) + 1 = 0.56x0 + 0.96x1 + 4.6 : + [3.08, 6.12] x10 range: [-3.8, 6.12] + x11.lb = 0.4x0 + 1.6x1 - 0.8 : [-2.8, 1.2] x11.ub = 0.6 ( -0.4x0 + 1.6x1 + 0.8 ) + 1.12 = -0.24 x0 + 0.96 x1 + 1.6 : [0.4, 2.8] x11 range: [-2.8, 2.8] - + */ List expectedBounds( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 4, -0.4, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -0.4, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2.8, Tightening::UB ), - Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), + Tightening( 7, -2.8, Tightening::LB ), Tightening( 7, 2.8, Tightening::UB ), - Tightening( 8, -0.4, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), - Tightening( 9, -0.56, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), + Tightening( 8, -0.4, Tightening::LB ), Tightening( 8, 2.8, Tightening::UB ), + Tightening( 9, -0.56, Tightening::LB ), Tightening( 9, 2.8, Tightening::UB ), Tightening( 10, -3.8, Tightening::LB ), Tightening( 10, 6.12, Tightening::UB ), - Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) + Tightening( 11, -2.8, Tightening::LB ), Tightening( 11, 2.8, Tightening::UB ) } ); @@ -9222,7 +9241,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void test_sbt_sigmoids_and_round() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -9303,11 +9322,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getLb( 1 ), -1 ); TS_ASSERT_EQUALS( nlr.getLayer( 4 )->getUb( 1 ), 1 ); } - + void test_sbt_max_not_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -9343,14 +9362,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x4.lb = 0.6 ( x0 + x1 ) = 0.6x0 + 0.6x1 x4.ub = 0.6 ( x0 + x1 ) + 1.2 = 0.6x0 + 0.6x1 + 1.2 x4 range: [0, 3] - + x5.lb = 0.4 ( x0 - x1 ) = 0.4x0 + 0.4x1 x5.ub = 0.4 ( x0 - x1 ) + 1.2 = 0.4x0 + 0.4x1 + 1.2 x5 range: [0, 2] - + Max is not fixed because x5.lb <= x4.ub and x4.lb <= x5.ub Max inherits lower bound from x4, and its upper bound is constant 3. - + x6.lb = 0.6x0 + 0.6x1 : [-1.2, 1.8] x6.ub = 3 : [3, 3] x6 range: [-1.2, 3] @@ -9386,7 +9405,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void test_sbt_max_fixed() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -9419,12 +9438,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Second ReLU is positive, bounds survive the activation x4: all set to 0 - + x5.lb = x0 - x1 : [3, 5] x5.ub = x0 - x1 : [3, 5] - + Max is fixed because x5.lb > x4.ub, it inherits x5's bounds - + x6.lb = x0 - x1 : [3, 5] x6.ub = x0 - x1 : [3, 5] @@ -9458,7 +9477,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void test_sbt_softmax1() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -9479,7 +9498,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void test_sbt_softmax2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + { Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); NLR::NetworkLevelReasoner nlr; @@ -9581,7 +9600,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void test_sbt_softmax3() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -9606,36 +9625,23 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x2: [1, 1.0001] */ - List expectedBounds({ Tightening( 3, 2, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), - Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 0, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), - Tightening( 6, -1, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), - Tightening( 7, -2, Tightening::UB ), - Tightening( 8, 0.8668, Tightening::LB ), - Tightening( 8, 0.8668, Tightening::UB ), - Tightening( 9, 0.9820, Tightening::LB ), - Tightening( 9, 0.9820, Tightening::UB ), - Tightening( 10, 0.1173, Tightening::LB ), - Tightening( 10, 0.1173, Tightening::UB ), - Tightening( 11, 0.0179, Tightening::LB ), - Tightening( 11, 0.0179, Tightening::UB ), - Tightening( 12, 0.0159, Tightening::LB ), - Tightening( 12, 0.0159, Tightening::UB ), - Tightening( 13, 0.9470, Tightening::LB ), - Tightening( 13, 0.9470, Tightening::UB ), - Tightening( 14, -0.9470, Tightening::LB ), - Tightening( 14, -0.9470, Tightening::UB ), - Tightening( 15, 1.0253, Tightening::LB ), - Tightening( 15, 1.0253, Tightening::UB ), - Tightening( 16, -1.0253, Tightening::LB ), - Tightening( 16, -1.0253, Tightening::UB ) + List expectedBounds( + { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), + Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), + Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), + Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), + Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), + Tightening( 13, 0.9470, Tightening::LB ), Tightening( 13, 0.9470, Tightening::UB ), + Tightening( 14, -0.9470, Tightening::LB ), Tightening( 14, -0.9470, Tightening::UB ), + Tightening( 15, 1.0253, Tightening::LB ), Tightening( 15, 1.0253, Tightening::UB ), + Tightening( 16, -1.0253, Tightening::LB ), Tightening( 16, -1.0253, Tightening::UB ) - } ); + } ); List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); @@ -9688,7 +9694,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite value = NLR::DeepPolySoftmaxElement::dLSEUpperbound( input, outputLb, outputUb, 0, 1 ); TS_ASSERT( FloatUtils::areEqual( value, -0.073207, 0.00001 ) ); } - + void test_sbt_bilinear() { NLR::NetworkLevelReasoner nlr; @@ -9710,7 +9716,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x0: [1, 2] x1: [-2, 1] - + Layer 1: x2.lb = x0 - 2x1 : [-1, 6] @@ -9724,7 +9730,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite alpha_l = x3.lb = -1 beta = x2.lb = -1 gamma_l = -x2.lb * x3.lb = --1 * -1 = -1 - + Upper bound: alpha_u = x3.ub = 3 beta = x2.lb = -1 @@ -9878,12 +9884,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite for ( const auto &bound : expectedBounds ) TS_ASSERT( bounds.exists( bound ) ); } - + void test_backwards_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -9897,42 +9904,42 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { Tightening( 8, 0, Tightening::LB ), - } ); - + + List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -9965,47 +9972,48 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - } ); - + + List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_relu2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -10023,56 +10031,56 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - List expectedBounds( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 19, 0, Tightening::LB ), - } ); - + + List expectedBounds2( { + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 19, 0, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -10125,63 +10133,64 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { Tightening( 7, 0, Tightening::LB ), - - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 20, 0, Tightening::LB ), - } ); - + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 20, 0, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_sigmoid() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -10196,42 +10205,40 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), - Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), - Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), - Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), - Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), + Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + + Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), + Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + + Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), + Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + } ); - Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), - Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -10264,46 +10271,45 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), - Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), - Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), + Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), + Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), - Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), + Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), + Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), + + Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), + Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), + + Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), + Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + } ); - Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), - Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), - } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_sigmoid2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -10320,53 +10326,51 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), - Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), - - Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), - Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), - Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), - - Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), - Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), - Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), - - Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), - Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), - - Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), - Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), - - Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), - } ); - + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), + Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), + + Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), + Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), + Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), + + Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), + Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), + Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), + + Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), + Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), + + Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), + Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), + + Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -10419,57 +10423,56 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), - Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), - Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), - - Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), - Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), - Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), - - Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), - Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), - Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), - - Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), - Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), - - Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), - Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), - - Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), + Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), + Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), + + Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), + Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), + Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), + + Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), + Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), + Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), + + Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), + Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), + + Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), + Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), + + Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_abs() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -10484,42 +10487,40 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + } ); - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -10552,46 +10553,45 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + } ); - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), - } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_abs2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -10608,53 +10608,51 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -10707,57 +10705,56 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), - - Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), - - Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + + Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + + Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_round() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -10772,42 +10769,43 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + } ); - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { Tightening( 11, -4, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - } ); - + + List expectedBounds2( { + Tightening( 11, -4, Tightening::LB ), + Tightening( 11, 6, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -10840,48 +10838,51 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + } ); - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), - } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { Tightening( 7, -10, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - - Tightening( 11, -14, Tightening::LB ), Tightening( 11, 11, Tightening::UB ), - } ); - + + List expectedBounds4( { + Tightening( 7, -10, Tightening::LB ), + Tightening( 7, 5, Tightening::UB ), + + Tightening( 11, -14, Tightening::LB ), + Tightening( 11, 11, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_round2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -10898,53 +10899,51 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), - Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), - Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), - - Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), - Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), - - Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), - Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), - - Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), - } ); - + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), + Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), + Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + + Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), + Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + + Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), + Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + + Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -10997,57 +10996,56 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), - Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), - Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), - Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), - - Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), + Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), + Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), + Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + + Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_sign() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -11062,42 +11060,40 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -11130,46 +11126,45 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_sign2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -11186,53 +11181,51 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -11285,57 +11278,56 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_leaky_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -11350,45 +11342,45 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { Tightening( 4, -0.1, Tightening::LB ), - - Tightening( 8, -0.045, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - } ); - + + List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -11421,50 +11413,51 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); - + List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_leaky_relu2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -11481,63 +11474,63 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), - - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), - - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), - } ); - + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.3, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - Tightening( 10, -0.6, Tightening::LB ), - - Tightening( 14, -0.9, Tightening::LB ), - Tightening( 15, -0.89, Tightening::LB ), - Tightening( 16, -0.77, Tightening::LB ), - - Tightening( 19, -2.3133, Tightening::LB ), - Tightening( 20, -1.2, Tightening::LB ), - } ); - + + List expectedBounds2( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 14, -0.9, Tightening::LB ), + Tightening( 15, -0.89, Tightening::LB ), + Tightening( 16, -0.77, Tightening::LB ), + + Tightening( 19, -2.3133, Tightening::LB ), + Tightening( 20, -1.2, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -11590,67 +11583,68 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), - - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), - - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.5, Tightening::LB ), - Tightening( 9, -0.6, Tightening::LB ), - Tightening( 10, -1.5, Tightening::LB ), - - Tightening( 14, -1.1, Tightening::LB ), - Tightening( 15, -2.1771, Tightening::LB ), - Tightening( 16, -1.15, Tightening::LB ), - - Tightening( 19, -5.6259, Tightening::LB ), - Tightening( 20, -1.9, Tightening::LB ), - } ); - + + List expectedBounds4( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), + Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 14, -1.1, Tightening::LB ), + Tightening( 15, -2.1771, Tightening::LB ), + Tightening( 16, -1.15, Tightening::LB ), + + Tightening( 19, -5.6259, Tightening::LB ), + Tightening( 20, -1.9, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_softmax_and_max() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -11660,47 +11654,45 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - + // Invoke SBT TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), - Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), - Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 8, -0.1519, Tightening::LB ), Tightening( 8, 1.4775, Tightening::UB ), - Tightening( 9, 0.6614, Tightening::LB ), Tightening( 9, 2.3899, Tightening::UB ), + Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), + Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), + Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + + Tightening( 8, -0.1519, Tightening::LB ), Tightening( 8, 1.4775, Tightening::UB ), + Tightening( 9, 0.6614, Tightening::LB ), Tightening( 9, 2.3899, Tightening::UB ), + + Tightening( 10, 0.6614, Tightening::LB ), Tightening( 10, 2.3899, Tightening::UB ), + + Tightening( 11, -2.3899, Tightening::LB ), Tightening( 11, -0.6614, Tightening::UB ), + } ); - Tightening( 10, 0.6614, Tightening::LB ), Tightening( 10, 2.3899, Tightening::UB ), - - Tightening( 11, -2.3899, Tightening::LB ), Tightening( 11, -0.6614, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -11733,46 +11725,45 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.0674, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), - - Tightening( 10, 0.0674, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), - - Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.0674, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.0674, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), + + Tightening( 10, 0.0674, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + + Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.0674, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_softmax_and_max2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -11790,46 +11781,44 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - List expectedBounds( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), - Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), - Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), - Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), - - Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9635, Tightening::UB ), - Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9806, Tightening::UB ), - Tightening( 13, -2.9902, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), - - Tightening( 14, 0.1086, Tightening::LB ), Tightening( 14, 0.9971, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8766, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4595, Tightening::UB ), - - Tightening( 17, 0.1086, Tightening::LB ), Tightening( 17, 0.9971, Tightening::UB ), - } ); - + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), + Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), + Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), + Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + + Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9635, Tightening::UB ), + Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9806, Tightening::UB ), + Tightening( 13, -2.9902, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + + Tightening( 14, 0.1086, Tightening::LB ), Tightening( 14, 0.9971, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8766, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4595, Tightening::UB ), + + Tightening( 17, 0.1086, Tightening::LB ), Tightening( 17, 0.9971, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -11874,51 +11863,50 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), - - Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9989, Tightening::UB ), - Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), - Tightening( 13, -2.9989, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), - - Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9972, Tightening::UB ), - Tightening( 15, 0.0024, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), - - Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9972, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + + Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9989, Tightening::UB ), + Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), + Tightening( 13, -2.9989, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + + Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9972, Tightening::UB ), + Tightening( 15, 0.0024, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + + Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9972, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { - } ); - + + List expectedBounds4( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_relu_and_bilinear() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -11928,46 +11916,44 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, 0 ); tableau.setUpperBound( 1, 1 ); - + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { - } ); - + + List expectedBounds2( {} ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -12000,46 +11986,47 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), - } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { Tightening( 7, 0, Tightening::LB ), - } ); - + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_backwards_relu_and_bilinear2() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-converge" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -12051,51 +12038,51 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 1, 1 ); tableau.setLowerBound( 2, -1 ); tableau.setUpperBound( 2, 1 ); - + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), - - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), - } ); - + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( - { Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - + + List expectedBounds2( { + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -12131,57 +12118,58 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 14, large ); tableau.setLowerBound( 15, -large ); tableau.setUpperBound( 15, large ); - + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), - - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), - } ); - + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( - { Tightening( 7, 0, Tightening::LB ), - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_preimage_approximation_leaky_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, "backward-preimage-approx" ); - + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); @@ -12196,44 +12184,59 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - List bounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - + + // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List polygonal_bounds; - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintPolygonalTightenings( polygonal_bounds ) ); - for ( auto &bound : polygonal_bounds ) + + /*List expectedBounds2( + { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2.1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.35, Tightening::UB ), + Tightening( 11, 1.35, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) { bound.dump(); } - /*TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) );*/ + + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); @@ -12266,64 +12269,66 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( - { Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - + + // Invoke backward LP propagation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( + + /*List expectedBounds4( { Tightening( 4, -0.5, Tightening::LB ), Tightening( 5, -0.4, Tightening::LB ), Tightening( 8, -0.4571, Tightening::LB ), Tightening( 9, -1.1057, Tightening::LB ), } ); - + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) );*/ } - + bool boundsEqual( const List &bounds, const List &expectedBounds ) { if ( bounds.size() != expectedBounds.size() ) return false; - + bool allFound = true; for ( const auto &bound : bounds ) { bool currentFound = false; for ( const auto &expectedBound : expectedBounds ) { - currentFound |= ( bound._type == expectedBound._type && bound._variable == expectedBound._variable && - FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); } allFound &= currentFound; } return allFound; } - + void updateTableau( MockTableau &tableau, List &tightenings ) { ASSERT( tableau ); From 625a5b85ba5d2740cae7af430679bb1acebbc404 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 12 Dec 2024 21:01:22 +0200 Subject: [PATCH 36/72] Add files via upload --- src/configuration/GlobalConfiguration.cpp | 3 ++- src/configuration/GlobalConfiguration.h | 10 +++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index 1a7d133f40..3a08b92234 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -70,8 +70,9 @@ const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 1000; -const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 1000; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 100; const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.1; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.05; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index a779b93937..78ab9f662e 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -150,19 +150,19 @@ class GlobalConfiguration // Random seed for generating simulation values. static const unsigned SIMULATION_RANDOM_SEED; - + // Random seed for EstimateVolume procedure (PreimageApproximation). static const unsigned VOLUME_ESTIMATION_RANDOM_SEED; - + // Number of iterations for EstimateVolume procedure (PreimageApproximation). static const unsigned VOLUME_ESTIMATION_ITERATIONS; - + // Random seed for PreimageApproximation optimization. static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED; - + // Maximum iterations for PreimageApproximation optimization. static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - + // Step size for PreimageApproximation optimization. static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; From 745cc957d22df8b4ae35a5f75c1324aff6cd43cb Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 9 Jan 2025 20:51:10 +0200 Subject: [PATCH 37/72] Add files via upload --- src/nlr/GlobalConfiguration.cpp | 238 ++++++++++++++++++++++++ src/nlr/GlobalConfiguration.h | 318 ++++++++++++++++++++++++++++++++ 2 files changed, 556 insertions(+) create mode 100644 src/nlr/GlobalConfiguration.cpp create mode 100644 src/nlr/GlobalConfiguration.h diff --git a/src/nlr/GlobalConfiguration.cpp b/src/nlr/GlobalConfiguration.cpp new file mode 100644 index 0000000000..a5d5100011 --- /dev/null +++ b/src/nlr/GlobalConfiguration.cpp @@ -0,0 +1,238 @@ +/********************* */ +/*! \file GlobalConfiguration.cpp + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Parth Shah, Derek Huang + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + + **/ + +#include "GlobalConfiguration.h" + +#include "DivideStrategy.h" +#include "MString.h" + +#include + + +// The exponential moving average is calculated as +// ema = current * alpha + previous * (1 - alpha) +const double GlobalConfiguration::EXPONENTIAL_MOVING_AVERAGE_ALPHA = 0.5; + +// Whether to use SoI instead of Reluplex for local search for satisfying assignments +// to non-linear constraint. +bool GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH = true; + +const double GlobalConfiguration::SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI = 5; + +// Use the polarity metrics to decide which branch to take first in a case split +// and how to repair a ReLU constraint. +const bool GlobalConfiguration::USE_POLARITY_BASED_DIRECTION_HEURISTICS = true; + +const double GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS = 0.0000000001; +const unsigned GlobalConfiguration::DEFAULT_DOUBLE_TO_STRING_PRECISION = 10; +const unsigned GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY = 10000; +const unsigned GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY_GUROBI = 100; +const double GlobalConfiguration::BOUND_COMPARISON_ADDITIVE_TOLERANCE = 0.0000001; +const double GlobalConfiguration::BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE = 0.001 * 0.0000001; +const double GlobalConfiguration::PIVOT_CHANGE_COLUMN_TOLERANCE = 0.000000001; +const double GlobalConfiguration::PIVOT_ROW_AND_COLUMN_TOLERANCE = 0.01; +const double GlobalConfiguration::ENTRY_ELIGIBILITY_TOLERANCE = 0.00000001; +const double GlobalConfiguration::RATIO_CONSTRAINT_ADDITIVE_TOLERANCE = 0.0000001 * 0.3; +const double GlobalConfiguration::RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE = + 0.001 * 0.0000001 * 0.3; +const double GlobalConfiguration::HARRIS_RATIO_CONSTRAINT_ADDITIVE_TOLERANCE = 0.0000001 * 0.5; +const double GlobalConfiguration::HARRIS_RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE = + 0.001 * 0.0000001 * 0.5; +const double GlobalConfiguration::BASIC_COSTS_ADDITIVE_TOLERANCE = 0.0000001; +const double GlobalConfiguration::BASIC_COSTS_MULTIPLICATIVE_TOLERANCE = 0.001 * 0.0000001; +const double GlobalConfiguration::SPARSE_FORREST_TOMLIN_DIAGONAL_ELEMENT_TOLERANCE = 0.00001; +const unsigned GlobalConfiguration::DEGRADATION_CHECKING_FREQUENCY = 100; +const double GlobalConfiguration::DEGRADATION_THRESHOLD = 0.1; +const double GlobalConfiguration::ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD = 0.0001; +const bool GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS = false; +const double GlobalConfiguration::GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD = 0.1; +const unsigned GlobalConfiguration::MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS = 5; +const DivideStrategy GlobalConfiguration::SPLITTING_HEURISTICS = DivideStrategy::ReLUViolation; +const unsigned GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY = 10; +const unsigned GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD = 10; +const unsigned GlobalConfiguration::BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY = 100; +const unsigned GlobalConfiguration::ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS = 20; +const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; + +const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 25000; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 25; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.025; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.25; + +const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; + +const double GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT = 0.00000000001; +const double GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT = 0.00000001; + +const double GlobalConfiguration::SIGMOID_CUTOFF_CONSTANT = 20; + +const bool GlobalConfiguration::PREPROCESS_INPUT_QUERY = true; +const bool GlobalConfiguration::PREPROCESSOR_ELIMINATE_VARIABLES = true; +const bool GlobalConfiguration::PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; +const bool GlobalConfiguration::NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; +const double GlobalConfiguration::PREPROCESSOR_ALMOST_FIXED_THRESHOLD = 0.00001; + +const unsigned GlobalConfiguration::PREPROCESSSING_MAX_TIGHTEING_ROUND = 10; + +const bool GlobalConfiguration::WARM_START = false; + +const unsigned GlobalConfiguration::MAX_ITERATIONS_WITHOUT_PROGRESS = 10000; + +const unsigned GlobalConfiguration::PSE_ITERATIONS_BEFORE_RESET = 1000; +const double GlobalConfiguration::PSE_GAMMA_ERROR_THRESHOLD = 0.001; +const double GlobalConfiguration::PSE_GAMMA_UPDATE_TOLERANCE = 0.000000001; + +const double GlobalConfiguration::CONSTRAINT_COMPARISON_TOLERANCE = 0.00001; + +const double GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD = 0.6; + +const bool GlobalConfiguration::ONLY_AUX_INITIAL_BASIS = false; + +const GlobalConfiguration::ExplicitBasisBoundTighteningType + GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE = + GlobalConfiguration::COMPUTE_INVERTED_BASIS_MATRIX; +const bool GlobalConfiguration::EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION = false; +const double GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_ROUNDING_CONSTANT = 1e-6; + +const unsigned GlobalConfiguration::REFACTORIZATION_THRESHOLD = 100; +const GlobalConfiguration::BasisFactorizationType GlobalConfiguration::BASIS_FACTORIZATION_TYPE = + GlobalConfiguration::SPARSE_FORREST_TOMLIN_FACTORIZATION; + +const unsigned GlobalConfiguration::POLARITY_CANDIDATES_THRESHOLD = 5; + +const unsigned GlobalConfiguration::DNC_DEPTH_THRESHOLD = 5; + +const double GlobalConfiguration::MINIMAL_COEFFICIENT_FOR_TIGHTENING = 0.01; +const double GlobalConfiguration::LEMMA_CERTIFICATION_TOLERANCE = 0.000001; +const bool GlobalConfiguration::WRITE_JSON_PROOF = false; + +const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; +const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; + +#ifdef ENABLE_GUROBI +const unsigned GlobalConfiguration::GUROBI_NUMBER_OF_THREADS = 1; +const bool GlobalConfiguration::GUROBI_LOGGING = false; +#endif // ENABLE_GUROBI + +// Logging - note that it is enabled only in Debug mode +const bool GlobalConfiguration::DNC_MANAGER_LOGGING = false; +const bool GlobalConfiguration::ENGINE_LOGGING = false; +const bool GlobalConfiguration::TABLEAU_LOGGING = false; +const bool GlobalConfiguration::SMT_CORE_LOGGING = false; +const bool GlobalConfiguration::DANTZIGS_RULE_LOGGING = false; +const bool GlobalConfiguration::BASIS_FACTORIZATION_LOGGING = false; +const bool GlobalConfiguration::PREPROCESSOR_LOGGING = false; +const bool GlobalConfiguration::INPUT_QUERY_LOGGING = false; +const bool GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING = false; +const bool GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING = false; +const bool GlobalConfiguration::QUERY_LOADER_LOGGING = false; +const bool GlobalConfiguration::SYMBOLIC_BOUND_TIGHTENER_LOGGING = false; +const bool GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING = false; +const bool GlobalConfiguration::MPS_PARSER_LOGGING = false; +const bool GlobalConfiguration::ONNX_PARSER_LOGGING = false; +const bool GlobalConfiguration::SOI_LOGGING = false; +const bool GlobalConfiguration::SCORE_TRACKER_LOGGING = false; +const bool GlobalConfiguration::CEGAR_LOGGING = false; + +const bool GlobalConfiguration::USE_SMART_FIX = false; +const bool GlobalConfiguration::USE_LEAST_FIX = false; + +void GlobalConfiguration::print() +{ + printf( "****************************\n" ); + printf( "*** Global Configuraiton ***\n" ); + printf( "****************************\n" ); + printf( " DEFAULT_EPSILON_FOR_COMPARISONS: %.15lf\n", DEFAULT_EPSILON_FOR_COMPARISONS ); + printf( " DEFAULT_DOUBLE_TO_STRING_PRECISION: %u\n", DEFAULT_DOUBLE_TO_STRING_PRECISION ); + printf( " STATISTICS_PRINTING_FREQUENCY: %u\n", STATISTICS_PRINTING_FREQUENCY ); + printf( " BOUND_COMPARISON_ADDITIVE_TOLERANCE: %.15lf\n", + BOUND_COMPARISON_ADDITIVE_TOLERANCE ); + printf( " BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE: %.15lf\n", + BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE ); + printf( " PIVOT_CHANGE_COLUMN_TOLERANCE: %.15lf\n", PIVOT_CHANGE_COLUMN_TOLERANCE ); + printf( " RATIO_CONSTRAINT_ADDITIVE_TOLERANCE: %.15lf\n", + RATIO_CONSTRAINT_ADDITIVE_TOLERANCE ); + printf( " RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE: %.15lf\n", + RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE ); + printf( " BASIC_COSTS_ADDITIVE_TOLERANCE: %.15lf\n", BASIC_COSTS_ADDITIVE_TOLERANCE ); + printf( " BASIC_COSTS_MULTIPLICATIVE_TOLERANCE: %.15lf\n", + BASIC_COSTS_MULTIPLICATIVE_TOLERANCE ); + printf( " DEGRADATION_CHECKING_FREQUENCY: %u\n", DEGRADATION_CHECKING_FREQUENCY ); + printf( " DEGRADATION_THRESHOLD: %.15lf\n", DEGRADATION_THRESHOLD ); + printf( " ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD: %.15lf\n", ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD ); + printf( " USE_COLUMN_MERGING_EQUATIONS: %s\n", USE_COLUMN_MERGING_EQUATIONS ? "Yes" : "No" ); + printf( " GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD: %.15lf\n", + GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD ); + printf( " MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS: %u\n", MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS ); + printf( " BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY: %u\n", + BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY ); + printf( " COST_FUNCTION_ERROR_THRESHOLD: %.15lf\n", COST_FUNCTION_ERROR_THRESHOLD ); + printf( " USE_HARRIS_RATIO_TEST: %s\n", USE_HARRIS_RATIO_TEST ? "Yes" : "No" ); + + printf( " PREPROCESS_INPUT_QUERY: %s\n", PREPROCESS_INPUT_QUERY ? "Yes" : "No" ); + printf( " PREPROCESSOR_ELIMINATE_VARIABLES: %s\n", + PREPROCESSOR_ELIMINATE_VARIABLES ? "Yes" : "No" ); + printf( " PSE_ITERATIONS_BEFORE_RESET: %u\n", PSE_ITERATIONS_BEFORE_RESET ); + printf( " PSE_GAMMA_ERROR_THRESHOLD: %.15lf\n", PSE_GAMMA_ERROR_THRESHOLD ); + printf( " CONSTRAINT_COMPARISON_TOLERANCE: %.15lf\n", CONSTRAINT_COMPARISON_TOLERANCE ); + + String basisBoundTighteningType; + switch ( EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE ) + { + case COMPUTE_INVERTED_BASIS_MATRIX: + basisBoundTighteningType = "Compute inverted basis matrix"; + break; + + case USE_IMPLICIT_INVERTED_BASIS_MATRIX: + basisBoundTighteningType = "Use implicit inverted basis matrix"; + break; + + default: + basisBoundTighteningType = "Unknown"; + break; + } + + printf( " EXPLICIT_BASIS_BOUND_TIGHTENING_INVERT_BASIS: %s\n", + basisBoundTighteningType.ascii() ); + printf( " EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION: %s\n", + EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION ? "Yes" : "No" ); + printf( " REFACTORIZATION_THRESHOLD: %u\n", REFACTORIZATION_THRESHOLD ); + + String basisFactorizationType; + if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == GlobalConfiguration::LU_FACTORIZATION ) + basisFactorizationType = "LU_FACTORIZATION"; + else if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == + GlobalConfiguration::SPARSE_LU_FACTORIZATION ) + basisFactorizationType = "SPARSE_LU_FACTORIZATION"; + else if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == + GlobalConfiguration::FORREST_TOMLIN_FACTORIZATION ) + basisFactorizationType = "FORREST_TOMLIN_FACTORIZATION"; + else + basisFactorizationType = "Unknown"; + + printf( " BASIS_FACTORIZATION_TYPE: %s\n", basisFactorizationType.ascii() ); + printf( "****************************\n" ); +} + +// +// Local Variables: +// compile-command: "make -C ../.. " +// tags-file-name: "../../TAGS" +// c-basic-offset: 4 +// End: +// diff --git a/src/nlr/GlobalConfiguration.h b/src/nlr/GlobalConfiguration.h new file mode 100644 index 0000000000..92dcfdd049 --- /dev/null +++ b/src/nlr/GlobalConfiguration.h @@ -0,0 +1,318 @@ +/********************* */ +/*! \file GlobalConfiguration.h + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Parth Shah, Derek Huang + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + + **/ + +#ifndef __GlobalConfiguration_h__ +#define __GlobalConfiguration_h__ + +#include "DivideStrategy.h" + +class GlobalConfiguration +{ +public: + static void print(); + + // The exponential moving average is calculated as + // ema = current * alpha + previous * (1 - alpha) + static const double EXPONENTIAL_MOVING_AVERAGE_ALPHA; + + // Whether to use SoI instead of Reluplex for local search for satisfying assignments + // to non-linear constraint. + static bool USE_DEEPSOI_LOCAL_SEARCH; + + // The quantity by which the score is bumped up for PLContraints not + // participating in the SoI. This promotes those constraints in the branching + // order. + static const double SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI; + + // Use the polarity metrics to decide which branch to take first in a case split + // and how to repair a ReLU constraint. + static const bool USE_POLARITY_BASED_DIRECTION_HEURISTICS; + + // The default epsilon used for comparing doubles + static const double DEFAULT_EPSILON_FOR_COMPARISONS; + + // The precision level when convering doubles to strings + static const unsigned DEFAULT_DOUBLE_TO_STRING_PRECISION; + + // How often should the main loop print statistics? + static const unsigned STATISTICS_PRINTING_FREQUENCY; + static const unsigned STATISTICS_PRINTING_FREQUENCY_GUROBI; + + // Tolerance when checking whether the value computed for a basic variable is out of bounds + static const double BOUND_COMPARISON_ADDITIVE_TOLERANCE; + static const double BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE; + + // Tolerance when checking whether a basic variable depends on a non-basic variable, by looking + // at the change column, as part of a pivot operation. + static const double PIVOT_CHANGE_COLUMN_TOLERANCE; + + // Tolerance for the difference when computing the pivot entry by column and by row + static const double PIVOT_ROW_AND_COLUMN_TOLERANCE; + + // Tolerance when checking whether a non-basic variable is eligible for being selected as the + // entering variable, by its reduced cost + static const double ENTRY_ELIGIBILITY_TOLERANCE; + + // Ratio test tolerance constants + static const double RATIO_CONSTRAINT_ADDITIVE_TOLERANCE; + static const double RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE; + static const double HARRIS_RATIO_CONSTRAINT_ADDITIVE_TOLERANCE; + static const double HARRIS_RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE; + + // Cost function tolerance constants + static const double BASIC_COSTS_ADDITIVE_TOLERANCE; + static const double BASIC_COSTS_MULTIPLICATIVE_TOLERANCE; + + // Sparse ForrestTomlin diagonal element tolerance constant + static const double SPARSE_FORREST_TOMLIN_DIAGONAL_ELEMENT_TOLERANCE; + + // Toggle use of Harris' two-pass ratio test for selecting the leaving variable + static const bool USE_HARRIS_RATIO_TEST; + + // Toggle query-preprocessing on/off. + static const bool PREPROCESS_INPUT_QUERY; + + // Assuming the preprocessor is on, toggle whether or not it will attempt to perform variable + // elimination. + static const bool PREPROCESSOR_ELIMINATE_VARIABLES; + + // Toggle whether or not PL/NL constraints will be called upon + // to add auxiliary variables and equations after preprocessing. + static const bool PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING; + static const bool NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING; + + // If the difference between a variable's lower and upper bounds is smaller than this + // threshold, the preprocessor will treat it as fixed. + static const double PREPROCESSOR_ALMOST_FIXED_THRESHOLD; + + // Maximal rounds of tightening to perform in the preprocessor to avoid non-termination. + static const unsigned PREPROCESSSING_MAX_TIGHTEING_ROUND; + + // Try to set the initial tableau assignment to an assignment that is legal with + // respect to the input network. + static const bool WARM_START; + + // The maximal number of iterations without new tree states being visited, before + // the engine performs a precision restoration. + static const unsigned MAX_ITERATIONS_WITHOUT_PROGRESS; + + // How often should the main loop check the current degradation? + static const unsigned DEGRADATION_CHECKING_FREQUENCY; + + // The threshold of degradation above which restoration is required + static const double DEGRADATION_THRESHOLD; + + // If a pivot element in a simplex iteration is smaller than this threshold, the engine will + // attempt to pick another element. + static const double ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD; + + // If true, column-merging equations are given special treatment and cause columns in the + // tableau to be merged (instead of a new row added). + static const bool USE_COLUMN_MERGING_EQUATIONS; + + // If a pivot element in a Gaussian elimination iteration is smaller than this threshold times + // the largest element in the column, the elimination engine will attempt to pick another pivot. + static const double GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD; + + // How many potential pivots should the engine inspect (at most) in every simplex iteration? + static const unsigned MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS; + + static const DivideStrategy SPLITTING_HEURISTICS; + + // The frequency to use interval splitting when largest interval splitting strategy is in use. + static const unsigned INTERVAL_SPLITTING_FREQUENCY; + + // When automatically deciding which splitting strategy to use, we use relu-splitting if + // the number of inputs is larger than this number. + static const unsigned INTERVAL_SPLITTING_THRESHOLD; + + // How often should we perform full bound tightening, on the entire contraints matrix A. + static const unsigned BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY; + + // When the row bound tightener is asked to run until saturation, it can enter an infinite loop + // due to tiny increments in bounds. This number limits the number of iterations it can perform. + static const unsigned ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS; + + // If the cost function error exceeds this threshold, it is recomputed + static const double COST_FUNCTION_ERROR_THRESHOLD; + + // Random seed for generating simulation values. + static const unsigned SIMULATION_RANDOM_SEED; + + // Random seed for EstimateVolume procedure (PreimageApproximation). + static const unsigned VOLUME_ESTIMATION_RANDOM_SEED; + + // Number of iterations for EstimateVolume procedure (PreimageApproximation). + static const unsigned VOLUME_ESTIMATION_ITERATIONS; + + // Random seed for PreimageApproximation optimization. + static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED; + + // Maximum iterations for PreimageApproximation optimization. + static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + + // Step size for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + + // Learning rate for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + + // How often should projected steepest edge reset the reference space? + static const unsigned PSE_ITERATIONS_BEFORE_RESET; + + // An error threshold which, when crossed, causes projected steepest edge to reset the reference + // space + static const double PSE_GAMMA_ERROR_THRESHOLD; + + // PSE's Gamma function's update tolerance + static const double PSE_GAMMA_UPDATE_TOLERANCE; + + // The tolerance for checking whether f = Constraint( b ), Constraint \in { ReLU, ABS, Sign} + static const double CONSTRAINT_COMPARISON_TOLERANCE; + + // Toggle between two types of LSE lower bound for softmax + static const double SOFTMAX_LSE2_THRESHOLD; + + // Should the initial basis be comprised only of auxiliary (row) variables? + static const bool ONLY_AUX_INITIAL_BASIS; + + /* + Explicit (Reluplex-style) bound tightening options + */ + + enum ExplicitBasisBoundTighteningType { + // Compute the inverse basis matrix and use it + COMPUTE_INVERTED_BASIS_MATRIX = 0, + // Use the inverted basis matrix without using it, via transformations + USE_IMPLICIT_INVERTED_BASIS_MATRIX = 1, + // Disable explicit basis bound tightening + DISABLE_EXPLICIT_BASIS_TIGHTENING = 2, + }; + + // When doing bound tightening using the explicit basis matrix, should the basis matrix be + // inverted? + static const ExplicitBasisBoundTighteningType EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE; + static const double EXPLICIT_BASIS_BOUND_TIGHTENING_ROUNDING_CONSTANT; + + // When doing explicit bound tightening, should we repeat until saturation? + static const bool EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION; + + /* + Symbolic bound tightening options + */ + + // Symbolic tightening, LP rounding constants + static const double SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; + static const double LP_TIGHTENING_ROUNDING_CONSTANT; + + static const double SIGMOID_CUTOFF_CONSTANT; + + /* + Constraint fixing heuristics + */ + + // When a PL constraint proposes a fix that affects multiple variables, should it first query + // for any relevant linear connections between the variables? + static const bool USE_SMART_FIX; + + // A heuristic for selecting which of the broken PL constraints will be repaired next. In this + // case, the one that has been repaired the least number of times so far. + static const bool USE_LEAST_FIX; + + /* + Basis factorization options + */ + + // The number of accumualted eta matrices, after which the basis will be refactorized + static const unsigned REFACTORIZATION_THRESHOLD; + + // The kind of basis factorization algorithm in use + enum BasisFactorizationType { + LU_FACTORIZATION, + SPARSE_LU_FACTORIZATION, + FORREST_TOMLIN_FACTORIZATION, + SPARSE_FORREST_TOMLIN_FACTORIZATION, + }; + static const BasisFactorizationType BASIS_FACTORIZATION_TYPE; + + /* In the polarity-based branching heuristics, only this many earliest nodes + are considered to branch on. + */ + static const unsigned POLARITY_CANDIDATES_THRESHOLD; + + /* The max number of DnC splits + */ + static const unsigned DNC_DEPTH_THRESHOLD; + + /* Minimal coefficient of a variable in a Tableau row, that is used for bound tightening + */ + static const double MINIMAL_COEFFICIENT_FOR_TIGHTENING; + + /* The tolerance of errors when checking lemmas in the proof-checking process + */ + static const double LEMMA_CERTIFICATION_TOLERANCE; + + /* Denote whether proofs should be written as a JSON file + */ + static const bool WRITE_JSON_PROOF; + + /* How many layers after the current layer do we encode in backward analysis. + */ + static const unsigned BACKWARD_BOUND_PROPAGATION_DEPTH; + + /* How many rounds of backward analysis to perform? + */ + static const unsigned MAX_ROUNDS_OF_BACKWARD_ANALYSIS; + +#ifdef ENABLE_GUROBI + /* + The number of threads Gurobi spawns + */ + static const unsigned GUROBI_NUMBER_OF_THREADS; + static const bool GUROBI_LOGGING; +#endif // ENABLE_GUROBI + + /* + Logging options + */ + static const bool DNC_MANAGER_LOGGING; + static const bool ENGINE_LOGGING; + static const bool TABLEAU_LOGGING; + static const bool SMT_CORE_LOGGING; + static const bool DANTZIGS_RULE_LOGGING; + static const bool BASIS_FACTORIZATION_LOGGING; + static const bool PREPROCESSOR_LOGGING; + static const bool INPUT_QUERY_LOGGING; + static const bool PROJECTED_STEEPEST_EDGE_LOGGING; + static const bool GAUSSIAN_ELIMINATION_LOGGING; + static const bool QUERY_LOADER_LOGGING; + static const bool SYMBOLIC_BOUND_TIGHTENER_LOGGING; + static const bool NETWORK_LEVEL_REASONER_LOGGING; + static const bool MPS_PARSER_LOGGING; + static const bool ONNX_PARSER_LOGGING; + static const bool SOI_LOGGING; + static const bool SCORE_TRACKER_LOGGING; + static const bool CEGAR_LOGGING; +}; + +#endif // __GlobalConfiguration_h__ + +// +// Local Variables: +// compile-command: "make -C .. " +// tags-file-name: "../TAGS" +// c-basic-offset: 4 +// End: +// From 2d77fb97fe340a72f7376f9d922721e0b86919b9 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 9 Jan 2025 20:53:01 +0200 Subject: [PATCH 38/72] Delete src/nlr/GlobalConfiguration.cpp --- src/nlr/GlobalConfiguration.cpp | 238 -------------------------------- 1 file changed, 238 deletions(-) delete mode 100644 src/nlr/GlobalConfiguration.cpp diff --git a/src/nlr/GlobalConfiguration.cpp b/src/nlr/GlobalConfiguration.cpp deleted file mode 100644 index a5d5100011..0000000000 --- a/src/nlr/GlobalConfiguration.cpp +++ /dev/null @@ -1,238 +0,0 @@ -/********************* */ -/*! \file GlobalConfiguration.cpp - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz, Parth Shah, Derek Huang - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - - **/ - -#include "GlobalConfiguration.h" - -#include "DivideStrategy.h" -#include "MString.h" - -#include - - -// The exponential moving average is calculated as -// ema = current * alpha + previous * (1 - alpha) -const double GlobalConfiguration::EXPONENTIAL_MOVING_AVERAGE_ALPHA = 0.5; - -// Whether to use SoI instead of Reluplex for local search for satisfying assignments -// to non-linear constraint. -bool GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH = true; - -const double GlobalConfiguration::SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI = 5; - -// Use the polarity metrics to decide which branch to take first in a case split -// and how to repair a ReLU constraint. -const bool GlobalConfiguration::USE_POLARITY_BASED_DIRECTION_HEURISTICS = true; - -const double GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS = 0.0000000001; -const unsigned GlobalConfiguration::DEFAULT_DOUBLE_TO_STRING_PRECISION = 10; -const unsigned GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY = 10000; -const unsigned GlobalConfiguration::STATISTICS_PRINTING_FREQUENCY_GUROBI = 100; -const double GlobalConfiguration::BOUND_COMPARISON_ADDITIVE_TOLERANCE = 0.0000001; -const double GlobalConfiguration::BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE = 0.001 * 0.0000001; -const double GlobalConfiguration::PIVOT_CHANGE_COLUMN_TOLERANCE = 0.000000001; -const double GlobalConfiguration::PIVOT_ROW_AND_COLUMN_TOLERANCE = 0.01; -const double GlobalConfiguration::ENTRY_ELIGIBILITY_TOLERANCE = 0.00000001; -const double GlobalConfiguration::RATIO_CONSTRAINT_ADDITIVE_TOLERANCE = 0.0000001 * 0.3; -const double GlobalConfiguration::RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE = - 0.001 * 0.0000001 * 0.3; -const double GlobalConfiguration::HARRIS_RATIO_CONSTRAINT_ADDITIVE_TOLERANCE = 0.0000001 * 0.5; -const double GlobalConfiguration::HARRIS_RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE = - 0.001 * 0.0000001 * 0.5; -const double GlobalConfiguration::BASIC_COSTS_ADDITIVE_TOLERANCE = 0.0000001; -const double GlobalConfiguration::BASIC_COSTS_MULTIPLICATIVE_TOLERANCE = 0.001 * 0.0000001; -const double GlobalConfiguration::SPARSE_FORREST_TOMLIN_DIAGONAL_ELEMENT_TOLERANCE = 0.00001; -const unsigned GlobalConfiguration::DEGRADATION_CHECKING_FREQUENCY = 100; -const double GlobalConfiguration::DEGRADATION_THRESHOLD = 0.1; -const double GlobalConfiguration::ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD = 0.0001; -const bool GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS = false; -const double GlobalConfiguration::GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD = 0.1; -const unsigned GlobalConfiguration::MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS = 5; -const DivideStrategy GlobalConfiguration::SPLITTING_HEURISTICS = DivideStrategy::ReLUViolation; -const unsigned GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY = 10; -const unsigned GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD = 10; -const unsigned GlobalConfiguration::BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY = 100; -const unsigned GlobalConfiguration::ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS = 20; -const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; - -const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 25000; -const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 25; -const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.025; -const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.25; - -const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; - -const double GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT = 0.00000000001; -const double GlobalConfiguration::LP_TIGHTENING_ROUNDING_CONSTANT = 0.00000001; - -const double GlobalConfiguration::SIGMOID_CUTOFF_CONSTANT = 20; - -const bool GlobalConfiguration::PREPROCESS_INPUT_QUERY = true; -const bool GlobalConfiguration::PREPROCESSOR_ELIMINATE_VARIABLES = true; -const bool GlobalConfiguration::PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; -const bool GlobalConfiguration::NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; -const double GlobalConfiguration::PREPROCESSOR_ALMOST_FIXED_THRESHOLD = 0.00001; - -const unsigned GlobalConfiguration::PREPROCESSSING_MAX_TIGHTEING_ROUND = 10; - -const bool GlobalConfiguration::WARM_START = false; - -const unsigned GlobalConfiguration::MAX_ITERATIONS_WITHOUT_PROGRESS = 10000; - -const unsigned GlobalConfiguration::PSE_ITERATIONS_BEFORE_RESET = 1000; -const double GlobalConfiguration::PSE_GAMMA_ERROR_THRESHOLD = 0.001; -const double GlobalConfiguration::PSE_GAMMA_UPDATE_TOLERANCE = 0.000000001; - -const double GlobalConfiguration::CONSTRAINT_COMPARISON_TOLERANCE = 0.00001; - -const double GlobalConfiguration::SOFTMAX_LSE2_THRESHOLD = 0.6; - -const bool GlobalConfiguration::ONLY_AUX_INITIAL_BASIS = false; - -const GlobalConfiguration::ExplicitBasisBoundTighteningType - GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE = - GlobalConfiguration::COMPUTE_INVERTED_BASIS_MATRIX; -const bool GlobalConfiguration::EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION = false; -const double GlobalConfiguration::EXPLICIT_BASIS_BOUND_TIGHTENING_ROUNDING_CONSTANT = 1e-6; - -const unsigned GlobalConfiguration::REFACTORIZATION_THRESHOLD = 100; -const GlobalConfiguration::BasisFactorizationType GlobalConfiguration::BASIS_FACTORIZATION_TYPE = - GlobalConfiguration::SPARSE_FORREST_TOMLIN_FACTORIZATION; - -const unsigned GlobalConfiguration::POLARITY_CANDIDATES_THRESHOLD = 5; - -const unsigned GlobalConfiguration::DNC_DEPTH_THRESHOLD = 5; - -const double GlobalConfiguration::MINIMAL_COEFFICIENT_FOR_TIGHTENING = 0.01; -const double GlobalConfiguration::LEMMA_CERTIFICATION_TOLERANCE = 0.000001; -const bool GlobalConfiguration::WRITE_JSON_PROOF = false; - -const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; -const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; - -#ifdef ENABLE_GUROBI -const unsigned GlobalConfiguration::GUROBI_NUMBER_OF_THREADS = 1; -const bool GlobalConfiguration::GUROBI_LOGGING = false; -#endif // ENABLE_GUROBI - -// Logging - note that it is enabled only in Debug mode -const bool GlobalConfiguration::DNC_MANAGER_LOGGING = false; -const bool GlobalConfiguration::ENGINE_LOGGING = false; -const bool GlobalConfiguration::TABLEAU_LOGGING = false; -const bool GlobalConfiguration::SMT_CORE_LOGGING = false; -const bool GlobalConfiguration::DANTZIGS_RULE_LOGGING = false; -const bool GlobalConfiguration::BASIS_FACTORIZATION_LOGGING = false; -const bool GlobalConfiguration::PREPROCESSOR_LOGGING = false; -const bool GlobalConfiguration::INPUT_QUERY_LOGGING = false; -const bool GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING = false; -const bool GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING = false; -const bool GlobalConfiguration::QUERY_LOADER_LOGGING = false; -const bool GlobalConfiguration::SYMBOLIC_BOUND_TIGHTENER_LOGGING = false; -const bool GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING = false; -const bool GlobalConfiguration::MPS_PARSER_LOGGING = false; -const bool GlobalConfiguration::ONNX_PARSER_LOGGING = false; -const bool GlobalConfiguration::SOI_LOGGING = false; -const bool GlobalConfiguration::SCORE_TRACKER_LOGGING = false; -const bool GlobalConfiguration::CEGAR_LOGGING = false; - -const bool GlobalConfiguration::USE_SMART_FIX = false; -const bool GlobalConfiguration::USE_LEAST_FIX = false; - -void GlobalConfiguration::print() -{ - printf( "****************************\n" ); - printf( "*** Global Configuraiton ***\n" ); - printf( "****************************\n" ); - printf( " DEFAULT_EPSILON_FOR_COMPARISONS: %.15lf\n", DEFAULT_EPSILON_FOR_COMPARISONS ); - printf( " DEFAULT_DOUBLE_TO_STRING_PRECISION: %u\n", DEFAULT_DOUBLE_TO_STRING_PRECISION ); - printf( " STATISTICS_PRINTING_FREQUENCY: %u\n", STATISTICS_PRINTING_FREQUENCY ); - printf( " BOUND_COMPARISON_ADDITIVE_TOLERANCE: %.15lf\n", - BOUND_COMPARISON_ADDITIVE_TOLERANCE ); - printf( " BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE: %.15lf\n", - BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE ); - printf( " PIVOT_CHANGE_COLUMN_TOLERANCE: %.15lf\n", PIVOT_CHANGE_COLUMN_TOLERANCE ); - printf( " RATIO_CONSTRAINT_ADDITIVE_TOLERANCE: %.15lf\n", - RATIO_CONSTRAINT_ADDITIVE_TOLERANCE ); - printf( " RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE: %.15lf\n", - RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE ); - printf( " BASIC_COSTS_ADDITIVE_TOLERANCE: %.15lf\n", BASIC_COSTS_ADDITIVE_TOLERANCE ); - printf( " BASIC_COSTS_MULTIPLICATIVE_TOLERANCE: %.15lf\n", - BASIC_COSTS_MULTIPLICATIVE_TOLERANCE ); - printf( " DEGRADATION_CHECKING_FREQUENCY: %u\n", DEGRADATION_CHECKING_FREQUENCY ); - printf( " DEGRADATION_THRESHOLD: %.15lf\n", DEGRADATION_THRESHOLD ); - printf( " ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD: %.15lf\n", ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD ); - printf( " USE_COLUMN_MERGING_EQUATIONS: %s\n", USE_COLUMN_MERGING_EQUATIONS ? "Yes" : "No" ); - printf( " GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD: %.15lf\n", - GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD ); - printf( " MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS: %u\n", MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS ); - printf( " BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY: %u\n", - BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY ); - printf( " COST_FUNCTION_ERROR_THRESHOLD: %.15lf\n", COST_FUNCTION_ERROR_THRESHOLD ); - printf( " USE_HARRIS_RATIO_TEST: %s\n", USE_HARRIS_RATIO_TEST ? "Yes" : "No" ); - - printf( " PREPROCESS_INPUT_QUERY: %s\n", PREPROCESS_INPUT_QUERY ? "Yes" : "No" ); - printf( " PREPROCESSOR_ELIMINATE_VARIABLES: %s\n", - PREPROCESSOR_ELIMINATE_VARIABLES ? "Yes" : "No" ); - printf( " PSE_ITERATIONS_BEFORE_RESET: %u\n", PSE_ITERATIONS_BEFORE_RESET ); - printf( " PSE_GAMMA_ERROR_THRESHOLD: %.15lf\n", PSE_GAMMA_ERROR_THRESHOLD ); - printf( " CONSTRAINT_COMPARISON_TOLERANCE: %.15lf\n", CONSTRAINT_COMPARISON_TOLERANCE ); - - String basisBoundTighteningType; - switch ( EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE ) - { - case COMPUTE_INVERTED_BASIS_MATRIX: - basisBoundTighteningType = "Compute inverted basis matrix"; - break; - - case USE_IMPLICIT_INVERTED_BASIS_MATRIX: - basisBoundTighteningType = "Use implicit inverted basis matrix"; - break; - - default: - basisBoundTighteningType = "Unknown"; - break; - } - - printf( " EXPLICIT_BASIS_BOUND_TIGHTENING_INVERT_BASIS: %s\n", - basisBoundTighteningType.ascii() ); - printf( " EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION: %s\n", - EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION ? "Yes" : "No" ); - printf( " REFACTORIZATION_THRESHOLD: %u\n", REFACTORIZATION_THRESHOLD ); - - String basisFactorizationType; - if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == GlobalConfiguration::LU_FACTORIZATION ) - basisFactorizationType = "LU_FACTORIZATION"; - else if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == - GlobalConfiguration::SPARSE_LU_FACTORIZATION ) - basisFactorizationType = "SPARSE_LU_FACTORIZATION"; - else if ( GlobalConfiguration::BASIS_FACTORIZATION_TYPE == - GlobalConfiguration::FORREST_TOMLIN_FACTORIZATION ) - basisFactorizationType = "FORREST_TOMLIN_FACTORIZATION"; - else - basisFactorizationType = "Unknown"; - - printf( " BASIS_FACTORIZATION_TYPE: %s\n", basisFactorizationType.ascii() ); - printf( "****************************\n" ); -} - -// -// Local Variables: -// compile-command: "make -C ../.. " -// tags-file-name: "../../TAGS" -// c-basic-offset: 4 -// End: -// From 304efac01a1c8676dc79a2faad30b00ce9d9e4b7 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 9 Jan 2025 20:53:22 +0200 Subject: [PATCH 39/72] Delete src/nlr/GlobalConfiguration.h --- src/nlr/GlobalConfiguration.h | 318 ---------------------------------- 1 file changed, 318 deletions(-) delete mode 100644 src/nlr/GlobalConfiguration.h diff --git a/src/nlr/GlobalConfiguration.h b/src/nlr/GlobalConfiguration.h deleted file mode 100644 index 92dcfdd049..0000000000 --- a/src/nlr/GlobalConfiguration.h +++ /dev/null @@ -1,318 +0,0 @@ -/********************* */ -/*! \file GlobalConfiguration.h - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz, Parth Shah, Derek Huang - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - - **/ - -#ifndef __GlobalConfiguration_h__ -#define __GlobalConfiguration_h__ - -#include "DivideStrategy.h" - -class GlobalConfiguration -{ -public: - static void print(); - - // The exponential moving average is calculated as - // ema = current * alpha + previous * (1 - alpha) - static const double EXPONENTIAL_MOVING_AVERAGE_ALPHA; - - // Whether to use SoI instead of Reluplex for local search for satisfying assignments - // to non-linear constraint. - static bool USE_DEEPSOI_LOCAL_SEARCH; - - // The quantity by which the score is bumped up for PLContraints not - // participating in the SoI. This promotes those constraints in the branching - // order. - static const double SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI; - - // Use the polarity metrics to decide which branch to take first in a case split - // and how to repair a ReLU constraint. - static const bool USE_POLARITY_BASED_DIRECTION_HEURISTICS; - - // The default epsilon used for comparing doubles - static const double DEFAULT_EPSILON_FOR_COMPARISONS; - - // The precision level when convering doubles to strings - static const unsigned DEFAULT_DOUBLE_TO_STRING_PRECISION; - - // How often should the main loop print statistics? - static const unsigned STATISTICS_PRINTING_FREQUENCY; - static const unsigned STATISTICS_PRINTING_FREQUENCY_GUROBI; - - // Tolerance when checking whether the value computed for a basic variable is out of bounds - static const double BOUND_COMPARISON_ADDITIVE_TOLERANCE; - static const double BOUND_COMPARISON_MULTIPLICATIVE_TOLERANCE; - - // Tolerance when checking whether a basic variable depends on a non-basic variable, by looking - // at the change column, as part of a pivot operation. - static const double PIVOT_CHANGE_COLUMN_TOLERANCE; - - // Tolerance for the difference when computing the pivot entry by column and by row - static const double PIVOT_ROW_AND_COLUMN_TOLERANCE; - - // Tolerance when checking whether a non-basic variable is eligible for being selected as the - // entering variable, by its reduced cost - static const double ENTRY_ELIGIBILITY_TOLERANCE; - - // Ratio test tolerance constants - static const double RATIO_CONSTRAINT_ADDITIVE_TOLERANCE; - static const double RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE; - static const double HARRIS_RATIO_CONSTRAINT_ADDITIVE_TOLERANCE; - static const double HARRIS_RATIO_CONSTRAINT_MULTIPLICATIVE_TOLERANCE; - - // Cost function tolerance constants - static const double BASIC_COSTS_ADDITIVE_TOLERANCE; - static const double BASIC_COSTS_MULTIPLICATIVE_TOLERANCE; - - // Sparse ForrestTomlin diagonal element tolerance constant - static const double SPARSE_FORREST_TOMLIN_DIAGONAL_ELEMENT_TOLERANCE; - - // Toggle use of Harris' two-pass ratio test for selecting the leaving variable - static const bool USE_HARRIS_RATIO_TEST; - - // Toggle query-preprocessing on/off. - static const bool PREPROCESS_INPUT_QUERY; - - // Assuming the preprocessor is on, toggle whether or not it will attempt to perform variable - // elimination. - static const bool PREPROCESSOR_ELIMINATE_VARIABLES; - - // Toggle whether or not PL/NL constraints will be called upon - // to add auxiliary variables and equations after preprocessing. - static const bool PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING; - static const bool NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING; - - // If the difference between a variable's lower and upper bounds is smaller than this - // threshold, the preprocessor will treat it as fixed. - static const double PREPROCESSOR_ALMOST_FIXED_THRESHOLD; - - // Maximal rounds of tightening to perform in the preprocessor to avoid non-termination. - static const unsigned PREPROCESSSING_MAX_TIGHTEING_ROUND; - - // Try to set the initial tableau assignment to an assignment that is legal with - // respect to the input network. - static const bool WARM_START; - - // The maximal number of iterations without new tree states being visited, before - // the engine performs a precision restoration. - static const unsigned MAX_ITERATIONS_WITHOUT_PROGRESS; - - // How often should the main loop check the current degradation? - static const unsigned DEGRADATION_CHECKING_FREQUENCY; - - // The threshold of degradation above which restoration is required - static const double DEGRADATION_THRESHOLD; - - // If a pivot element in a simplex iteration is smaller than this threshold, the engine will - // attempt to pick another element. - static const double ACCEPTABLE_SIMPLEX_PIVOT_THRESHOLD; - - // If true, column-merging equations are given special treatment and cause columns in the - // tableau to be merged (instead of a new row added). - static const bool USE_COLUMN_MERGING_EQUATIONS; - - // If a pivot element in a Gaussian elimination iteration is smaller than this threshold times - // the largest element in the column, the elimination engine will attempt to pick another pivot. - static const double GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD; - - // How many potential pivots should the engine inspect (at most) in every simplex iteration? - static const unsigned MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS; - - static const DivideStrategy SPLITTING_HEURISTICS; - - // The frequency to use interval splitting when largest interval splitting strategy is in use. - static const unsigned INTERVAL_SPLITTING_FREQUENCY; - - // When automatically deciding which splitting strategy to use, we use relu-splitting if - // the number of inputs is larger than this number. - static const unsigned INTERVAL_SPLITTING_THRESHOLD; - - // How often should we perform full bound tightening, on the entire contraints matrix A. - static const unsigned BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY; - - // When the row bound tightener is asked to run until saturation, it can enter an infinite loop - // due to tiny increments in bounds. This number limits the number of iterations it can perform. - static const unsigned ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS; - - // If the cost function error exceeds this threshold, it is recomputed - static const double COST_FUNCTION_ERROR_THRESHOLD; - - // Random seed for generating simulation values. - static const unsigned SIMULATION_RANDOM_SEED; - - // Random seed for EstimateVolume procedure (PreimageApproximation). - static const unsigned VOLUME_ESTIMATION_RANDOM_SEED; - - // Number of iterations for EstimateVolume procedure (PreimageApproximation). - static const unsigned VOLUME_ESTIMATION_ITERATIONS; - - // Random seed for PreimageApproximation optimization. - static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED; - - // Maximum iterations for PreimageApproximation optimization. - static const unsigned PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - - // Step size for PreimageApproximation optimization. - static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; - - // Learning rate for PreimageApproximation optimization. - static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; - - // How often should projected steepest edge reset the reference space? - static const unsigned PSE_ITERATIONS_BEFORE_RESET; - - // An error threshold which, when crossed, causes projected steepest edge to reset the reference - // space - static const double PSE_GAMMA_ERROR_THRESHOLD; - - // PSE's Gamma function's update tolerance - static const double PSE_GAMMA_UPDATE_TOLERANCE; - - // The tolerance for checking whether f = Constraint( b ), Constraint \in { ReLU, ABS, Sign} - static const double CONSTRAINT_COMPARISON_TOLERANCE; - - // Toggle between two types of LSE lower bound for softmax - static const double SOFTMAX_LSE2_THRESHOLD; - - // Should the initial basis be comprised only of auxiliary (row) variables? - static const bool ONLY_AUX_INITIAL_BASIS; - - /* - Explicit (Reluplex-style) bound tightening options - */ - - enum ExplicitBasisBoundTighteningType { - // Compute the inverse basis matrix and use it - COMPUTE_INVERTED_BASIS_MATRIX = 0, - // Use the inverted basis matrix without using it, via transformations - USE_IMPLICIT_INVERTED_BASIS_MATRIX = 1, - // Disable explicit basis bound tightening - DISABLE_EXPLICIT_BASIS_TIGHTENING = 2, - }; - - // When doing bound tightening using the explicit basis matrix, should the basis matrix be - // inverted? - static const ExplicitBasisBoundTighteningType EXPLICIT_BASIS_BOUND_TIGHTENING_TYPE; - static const double EXPLICIT_BASIS_BOUND_TIGHTENING_ROUNDING_CONSTANT; - - // When doing explicit bound tightening, should we repeat until saturation? - static const bool EXPLICIT_BOUND_TIGHTENING_UNTIL_SATURATION; - - /* - Symbolic bound tightening options - */ - - // Symbolic tightening, LP rounding constants - static const double SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; - static const double LP_TIGHTENING_ROUNDING_CONSTANT; - - static const double SIGMOID_CUTOFF_CONSTANT; - - /* - Constraint fixing heuristics - */ - - // When a PL constraint proposes a fix that affects multiple variables, should it first query - // for any relevant linear connections between the variables? - static const bool USE_SMART_FIX; - - // A heuristic for selecting which of the broken PL constraints will be repaired next. In this - // case, the one that has been repaired the least number of times so far. - static const bool USE_LEAST_FIX; - - /* - Basis factorization options - */ - - // The number of accumualted eta matrices, after which the basis will be refactorized - static const unsigned REFACTORIZATION_THRESHOLD; - - // The kind of basis factorization algorithm in use - enum BasisFactorizationType { - LU_FACTORIZATION, - SPARSE_LU_FACTORIZATION, - FORREST_TOMLIN_FACTORIZATION, - SPARSE_FORREST_TOMLIN_FACTORIZATION, - }; - static const BasisFactorizationType BASIS_FACTORIZATION_TYPE; - - /* In the polarity-based branching heuristics, only this many earliest nodes - are considered to branch on. - */ - static const unsigned POLARITY_CANDIDATES_THRESHOLD; - - /* The max number of DnC splits - */ - static const unsigned DNC_DEPTH_THRESHOLD; - - /* Minimal coefficient of a variable in a Tableau row, that is used for bound tightening - */ - static const double MINIMAL_COEFFICIENT_FOR_TIGHTENING; - - /* The tolerance of errors when checking lemmas in the proof-checking process - */ - static const double LEMMA_CERTIFICATION_TOLERANCE; - - /* Denote whether proofs should be written as a JSON file - */ - static const bool WRITE_JSON_PROOF; - - /* How many layers after the current layer do we encode in backward analysis. - */ - static const unsigned BACKWARD_BOUND_PROPAGATION_DEPTH; - - /* How many rounds of backward analysis to perform? - */ - static const unsigned MAX_ROUNDS_OF_BACKWARD_ANALYSIS; - -#ifdef ENABLE_GUROBI - /* - The number of threads Gurobi spawns - */ - static const unsigned GUROBI_NUMBER_OF_THREADS; - static const bool GUROBI_LOGGING; -#endif // ENABLE_GUROBI - - /* - Logging options - */ - static const bool DNC_MANAGER_LOGGING; - static const bool ENGINE_LOGGING; - static const bool TABLEAU_LOGGING; - static const bool SMT_CORE_LOGGING; - static const bool DANTZIGS_RULE_LOGGING; - static const bool BASIS_FACTORIZATION_LOGGING; - static const bool PREPROCESSOR_LOGGING; - static const bool INPUT_QUERY_LOGGING; - static const bool PROJECTED_STEEPEST_EDGE_LOGGING; - static const bool GAUSSIAN_ELIMINATION_LOGGING; - static const bool QUERY_LOADER_LOGGING; - static const bool SYMBOLIC_BOUND_TIGHTENER_LOGGING; - static const bool NETWORK_LEVEL_REASONER_LOGGING; - static const bool MPS_PARSER_LOGGING; - static const bool ONNX_PARSER_LOGGING; - static const bool SOI_LOGGING; - static const bool SCORE_TRACKER_LOGGING; - static const bool CEGAR_LOGGING; -}; - -#endif // __GlobalConfiguration_h__ - -// -// Local Variables: -// compile-command: "make -C .. " -// tags-file-name: "../TAGS" -// c-basic-offset: 4 -// End: -// From 35cbdf5490c2d2d1b55ffee522307f7dd76b4b1e Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 9 Jan 2025 20:53:53 +0200 Subject: [PATCH 40/72] Add files via upload --- src/nlr/AdamOptimizer.h | 51 ++++++- src/nlr/CoordinateDescent.h | 12 ++ src/nlr/GradientDescent.h | 52 ++++++- src/nlr/LPFormulator.cpp | 233 +++++++++++++++++++++++------ src/nlr/LPFormulator.h | 12 +- src/nlr/Layer.cpp | 289 +++++++++++++++++++++++++++++++----- src/nlr/Layer.h | 2 + 7 files changed, 561 insertions(+), 90 deletions(-) diff --git a/src/nlr/AdamOptimizer.h b/src/nlr/AdamOptimizer.h index 7d97046329..018cd34325 100644 --- a/src/nlr/AdamOptimizer.h +++ b/src/nlr/AdamOptimizer.h @@ -12,6 +12,7 @@ #include // for std::sort #include +#include #include #include #include @@ -200,11 +201,10 @@ template struct adam_optimizer_parameters Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; Real lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; - Real lr = 0.05; Real epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; Real beta1 = 0.9; Real beta2 = 0.999; - Real weight_decay = 0.2; + Real weight_decay = 0; bool maximize = false; size_t max_iterations = 100; ArgumentContainer const *initial_guess = nullptr; @@ -295,6 +295,8 @@ ArgumentContainer adam_optimizer( auto epsilon = cd_params.epsilon; auto beta1 = cd_params.beta1; auto beta2 = cd_params.beta2; + auto lower_bounds = cd_params.lower_bounds; + auto upper_bounds = cd_params.upper_bounds; auto lr = cd_params.lr; auto weight_decay = cd_params.weight_decay; auto guess = @@ -319,14 +321,31 @@ ArgumentContainer adam_optimizer( for ( size_t i = 0; i < max_iterations; ++i ) { + double current_cost = cost_function( guess[0] ); + std::cout << " current_cost = " << current_cost << std::endl; for ( size_t j = 0; j < dimension; ++j ) { candidates[j] = guess[0]; candidates[j][j] += step_size; + if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) + { + gradient[j] = 0; + continue; + } + size_t sign = ( maximize == false ? 1 : -1 ); double cost = cost_function( candidates[j] ); - gradient[j] = sign * cost / step_size + weight_decay * guess[0][j]; + + for ( size_t k = 0; k < dimension; ++k ) + { + std::cout << "candidates[" << j << "][" << k << "] = " << candidates[j][k] + << std::endl; + std::cout << "guess[0][" << k << "] = " << guess[0][k] << std::endl; + } + + std::cout << " cost = " << cost << std::endl; + gradient[j] = sign * ( cost - current_cost ) / step_size + weight_decay * guess[0][j]; if ( !FloatUtils::isNan( target_value ) && cost <= target_value ) { @@ -335,6 +354,21 @@ ArgumentContainer adam_optimizer( } } + bool gradient_is_zero = true; + for ( size_t j = 0; j < dimension; ++j ) + { + std::cout << "gradient[" << j << "] = " << gradient[j] << std::endl; + if ( FloatUtils::abs( gradient[j] ) > epsilon ) + { + gradient_is_zero = false; + } + } + + if ( gradient_is_zero ) + { + break; + } + for ( size_t j = 0; j < dimension; ++j ) { first_moment[j] = beta1 * first_moment[j] + ( 1 - beta1 ) * gradient[j]; @@ -344,6 +378,17 @@ ArgumentContainer adam_optimizer( second_moment[j] /= ( 1 - std::pow( beta2, step_size ) ); guess[0][j] -= lr * first_moment[j] / ( std::sqrt( second_moment[j] ) + epsilon ); + + + if ( guess[0][j] > upper_bounds[j] ) + { + guess[0][j] = upper_bounds[j]; + } + + if ( guess[0][j] < lower_bounds[j] ) + { + guess[0][j] = lower_bounds[j]; + } } } diff --git a/src/nlr/CoordinateDescent.h b/src/nlr/CoordinateDescent.h index 83f01ed197..f73d909c7f 100644 --- a/src/nlr/CoordinateDescent.h +++ b/src/nlr/CoordinateDescent.h @@ -251,6 +251,8 @@ ArgumentContainer coordinate_descent( const size_t candidates_number = 2 * dimension; auto step_size = cd_params.step_size; auto max_iterations = cd_params.max_iterations; + auto lower_bounds = cd_params.lower_bounds; + auto upper_bounds = cd_params.upper_bounds; auto guess = random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); @@ -272,6 +274,16 @@ ArgumentContainer coordinate_descent( size_t sign = ( j % 2 == 0 ? 1 : -1 ); candidates[j][index] += sign * step_size; + if ( candidates[j][index] > upper_bounds[j] ) + { + candidates[j][index] = upper_bounds[j]; + } + + if ( candidates[j][index] < lower_bounds[j] ) + { + candidates[j][index] = lower_bounds[j]; + } + costs[j] = cost_function( candidates[j] ); if ( current_minimum_cost && costs[j] < *current_minimum_cost ) diff --git a/src/nlr/GradientDescent.h b/src/nlr/GradientDescent.h index e86aca17d9..f582711397 100644 --- a/src/nlr/GradientDescent.h +++ b/src/nlr/GradientDescent.h @@ -12,6 +12,7 @@ #include // for std::sort #include +#include #include #include #include @@ -199,13 +200,11 @@ template struct gradient_descent_parameters ArgumentContainer upper_bounds; Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; - // size_t max_iterations = - // GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - size_t max_iterations = 2; + size_t max_iterations = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + Real epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; bool maximize = false; Real weight_decay = 0; - // Real lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; - Real lr = 0.001; + Real lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; ArgumentContainer const *initial_guess = nullptr; }; @@ -223,6 +222,13 @@ void validate_gradient_descent_parameters( throw std::domain_error( oss.str() ); } + if ( FloatUtils::isNan( cd_params.epsilon ) || cd_params.epsilon <= 0 ) + { + oss << __FILE__ << ":" << __LINE__ << ":" << __func__; + oss << ": epsilon > 0 is required, but got epsilon=" << cd_params.epsilon << "."; + throw std::domain_error( oss.str() ); + } + if ( cd_params.max_iterations < 1 ) { oss << __FILE__ << ":" << __LINE__ << ":" << __func__; @@ -270,8 +276,11 @@ ArgumentContainer gradient_descent( auto step_size = cd_params.step_size; auto max_iterations = cd_params.max_iterations; auto maximize = cd_params.maximize; + auto epsilon = cd_params.epsilon; auto lr = cd_params.lr; auto weight_decay = cd_params.weight_decay; + auto lower_bounds = cd_params.lower_bounds; + auto upper_bounds = cd_params.upper_bounds; auto guess = random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); @@ -286,15 +295,21 @@ ArgumentContainer gradient_descent( for ( size_t i = 0; i < max_iterations; ++i ) { + double current_cost = cost_function( guess[0] ); for ( size_t j = 0; j < dimension; ++j ) { candidates[j] = guess[0]; candidates[j][j] += step_size; + if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) + { + gradient[j] = 0; + continue; + } + size_t sign = ( maximize == false ? 1 : -1 ); double cost = cost_function( candidates[j] ); - gradient[j] = sign * cost / step_size + weight_decay * guess[0][j]; - + gradient[j] = sign * ( cost - current_cost ) / step_size + weight_decay * guess[0][j]; if ( !FloatUtils::isNan( target_value ) && cost <= target_value ) { guess[0] = candidates[j]; @@ -302,9 +317,32 @@ ArgumentContainer gradient_descent( } } + bool gradient_is_zero = true; + for ( size_t j = 0; j < dimension; ++j ) + { + if ( FloatUtils::abs( gradient[j] ) > epsilon ) + { + gradient_is_zero = false; + } + } + if ( gradient_is_zero ) + { + break; + } + for ( size_t j = 0; j < dimension; ++j ) { guess[0][j] -= lr * gradient[j]; + + if ( guess[0][j] > upper_bounds[j] ) + { + guess[0][j] = upper_bounds[j]; + } + + if ( guess[0][j] < lower_bounds[j] ) + { + guess[0][j] = lower_bounds[j]; + } } } diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index c7f3691157..31e5c31a4e 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -347,7 +347,7 @@ void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map>(); + auto opt_params = gradient_descent_parameters>(); unsigned depth = layers.size(); opt_params.lower_bounds.resize( depth, 0 ); opt_params.upper_bounds.resize( depth, 1 ); @@ -356,10 +356,16 @@ void LPFormulator::optimizeBoundsWithPreimageApproximation( const MapcomputeParameterisedSymbolicBounds( optimal_coeffs[i], true ); + { + std::cout << "optimal_coeffs[" << i << "] = " << optimal_coeffs[i] << std::endl; + if ( i == layers.size() - 1 ) + layers[i]->computeParameterisedSymbolicBounds( optimal_coeffs[i], true ); + else + layers[i]->computeParameterisedSymbolicBounds( optimal_coeffs[i] ); + } optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); } @@ -372,50 +378,57 @@ double LPFormulator::EstimateVolume( const Map &layers, layers[i]->computeParameterisedSymbolicBounds( coeffs[i] ); std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); - double average = 0; + double log_box_volume = 0; + double sigmoid_sum = 0; + + unsigned inputLayerIndex = 0; + unsigned outputLayerIndex = layers.size() - 1; + const Layer *inputLayer = layers[inputLayerIndex]; + const Layer *outputLayer = layers[outputLayerIndex]; + + // Calculate volume of input variables' bounding box. + for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) + { + if ( inputLayer->neuronEliminated( index ) ) + continue; + + double lb = inputLayer->getLb( index ); + double ub = inputLayer->getUb( index ); + + log_box_volume += std::log( ub - lb ); + } + for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) { - // Sample point from known bounds. + // Sample input point from known bounds. Map point; - for ( auto pair : layers ) + for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) { - unsigned layerIndex = pair.first; - const Layer *layer = pair.second; - for ( unsigned index = 0; index < layer->getSize(); ++index ) - { - if ( layer->neuronEliminated( index ) ) - continue; + if ( inputLayer->neuronEliminated( index ) ) + continue; - const NeuronIndex neuron( layerIndex, index ); - double lb = layer->getLb( index ); - double ub = layer->getUb( index ); - std::uniform_real_distribution<> dis( lb, ub ); - point.insert( neuron, dis( rng ) ); - } + NeuronIndex neuron( inputLayerIndex, index ); + double lb = inputLayer->getLb( index ); + double ub = inputLayer->getUb( index ); + std::uniform_real_distribution<> dis( lb, ub ); + point.insert( neuron, dis( rng ) ); } - // Compute the average sigmoid of log-sum-exp of points' difference from bounding - // hyperplanes. - double sum_exp = 0; - for ( auto pair : layers ) + // Calculate sigmoid of maximum margin from output symbolic bounds. + double max_margin = 0; + for ( unsigned index = 0; index < outputLayer->getSize(); ++index ) { - const Layer *layer = pair.second; - for ( unsigned index = 0; index < layer->getSize(); ++index ) - { - if ( layer->neuronEliminated( index ) ) - continue; + if ( outputLayer->neuronEliminated( index ) ) + continue; - double margin = layer->calculateDifferenceFromSymbolic( point, index ); - if ( FloatUtils::wellFormed( std::exp( margin ) ) ) - sum_exp += std::exp( margin ); - } + double margin = outputLayer->calculateDifferenceFromSymbolic( point, index ); + max_margin = std::max( max_margin, margin ); } - - double slse = 1 / ( 1 + ( 1 / sum_exp ) ); - average += slse / GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; + sigmoid_sum += SigmoidConstraint::sigmoid( max_margin ); } - return average; + return std::exp( log_box_volume + std::log( sigmoid_sum ) ) / + GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; } void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, @@ -1808,6 +1821,10 @@ void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, addSignLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); break; + case Layer::BILINEAR: + addBilinearLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + break; + default: addLayerToModel( gurobi, layer, createVariables ); break; @@ -1872,9 +1889,10 @@ void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurob /* The phase of this ReLU is not yet fixed. - For y = ReLU(x), we add the following triangular relaxation: + For y = ReLU(x), we add the following relaxation: 1. y >= 0 + 2. y >= x 2. y >= coeff * x 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) */ @@ -1884,7 +1902,13 @@ void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurob terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); gurobi.addGeqConstraint( terms, 0 ); - // y >= coeff * x, i.e. y - coeff * x >= 0 (varies continuously between y >= 0 and + // y >= x, i.e. y - x >= 0. + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + + // y >= coeff * x, i.e. y - coeff * x >= 0 (varies continuously between y >= 0 and // y >= alpha * x). terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1969,9 +1993,9 @@ void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurob /* 2 - y <= ----- * (1 - coeff) x + 1 (varies continuously between y <= 1 and y <= -2 / l * - x + 1). + y <= ----- * (1 - coeff) x + 1 - l + Varies continuously between y <= 1 and y <= -2/l * x + 1. */ List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1981,9 +2005,9 @@ void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurob /* 2 - y >= ----- * (1 - coeff) x - 1 (varies continuously between y >= -1 and y >= 2 / u * - x - 1). + y >= ----- * (1 - coeff) x - 1 u + Varies continuously between y >= -1 and y >= 2/u * x - 1. */ terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -2057,11 +2081,12 @@ void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper & /* The phase of this LeakyReLU is not yet fixed. - For y = LeakyReLU(x), we add the following triangular relaxation: + For y = LeakyReLU(x), we add the following relaxation: 1. y >= ((1 - alpha) * coeff + alpha) * x (varies continuously between y >= alpha * x and y >= x). 2. y >= x - 3. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) + 3. y >= alpha * x + 4. y is below the line the crosses (x.lb,0) and (x.ub,x.ub) */ // y >= ((1 - alpha) * coeff + alpha) * x @@ -2077,6 +2102,12 @@ void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper & terms.append( GurobiWrapper::Term( -1, Stringf( "x%u", sourceVariable ) ) ); gurobi.addGeqConstraint( terms, 0 ); + // y >= alpha * x, i.e. y - alpha * x >= 0 + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( -slope, Stringf( "x%u", sourceVariable ) ) ); + gurobi.addGeqConstraint( terms, 0 ); + terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -weight, Stringf( "x%u", sourceVariable ) ) ); @@ -2086,6 +2117,122 @@ void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper & } } +void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ + for ( unsigned i = 0; i < layer->getSize(); ++i ) + { + if ( !layer->neuronEliminated( i ) ) + { + unsigned targetVariable = layer->neuronToVariable( i ); + + List sources = layer->getActivationSources( i ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + Vector sourceNeurons; + bool allConstant = true; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + + sourceNeurons.append( sourceNeuron ); + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + + if ( createVariables && !gurobi.containsVariable( sourceName ) ) + gurobi.addVariable( sourceName, sourceLb, sourceUb ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + double targetValue = sourceValues[0] * sourceValues[1]; + gurobi.addVariable( Stringf( "x%u", targetVariable ), targetValue, targetValue ); + continue; + } + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); + + // Billinear linear relaxation (arXiv:2405.21063v2 [cs.LG]) + // Lower bound: out >= a_l * x + b_l * y + c_l, where + // a_l = alpha1 * l_y + ( 1 - alpha1 ) * u_y + // b_l = alpha1 * l_x + ( 1 - alpha1 ) * u_x + // c_l = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y + + // Upper bound: out <= a_u * x + b_u * y + c_u, where + // a_u = alpha2 * u_y + ( 1 - alpha2 ) * l_y + // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x + // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + + // Here assume alpha1 = alpha2 = coeff (and b_l = b_u). + List terms; + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( + -coeff * sourceLbs[1] - ( 1 - coeff ) * sourceUbs[1], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( + -coeff * sourceLbs[0] - ( 1 - coeff ) * sourceUbs[0], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addGeqConstraint( terms, + -coeff * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeff ) * sourceUbs[0] * sourceUbs[1] ); + + terms.clear(); + terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); + terms.append( GurobiWrapper::Term( + -coeff * sourceUbs[1] - ( 1 - coeff ) * sourceLbs[1], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + terms.append( GurobiWrapper::Term( + -coeff * sourceLbs[0] - ( 1 - coeff ) * sourceUbs[0], + Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + gurobi.addLeqConstraint( terms, + -coeff * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeff ) * sourceUbs[0] * sourceLbs[1] ); + } + } +} + +void LPFormulator::addSigmoidLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ) +{ +} + void LPFormulator::setCutoff( double cutoff ) { _cutoffInUse = true; diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index f1a5849004..cdfca2e81d 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -16,7 +16,7 @@ #ifndef __LPFormulator_h__ #define __LPFormulator_h__ -#include "AdamOptimizer.h" +#include "GradientDescent.h" #include "GurobiWrapper.h" #include "LayerOwner.h" #include "Map.h" @@ -160,6 +160,16 @@ class LPFormulator : public ParallelSolver bool createVariables, double coeff ); + void addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ); + + void addSigmoidLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, + const Layer *layer, + bool createVariables, + double coeff ); + // Estimate Volume of parameterised LP relaxations. static double EstimateVolume( const Map &layers, diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index b53777488e..cfb9749014 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -3394,19 +3394,17 @@ void Layer::computeSymbolicBoundsForWeightedSum() double Layer::calculateDifferenceFromSymbolic( Map &point, unsigned i ) const { - double lower_sum = _symbolicLowerBias[i]; - double upper_sum = _symbolicUpperBias[i]; + double lowerSum = _symbolicLowerBias[i]; + double upperSum = _symbolicUpperBias[i]; for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - const NeuronIndex neuron( 0, j ); - lower_sum += _symbolicLb[j * _size + i] * point[neuron]; - upper_sum += _symbolicUb[j * _size + i] * point[neuron]; + NeuronIndex neuron( 0, j ); + lowerSum += _symbolicLb[j * _size + i] * point[neuron]; + upperSum += _symbolicUb[j * _size + i] * point[neuron]; } - const NeuronIndex currentNeuron( _layerIndex, i ); - return FloatUtils::max( point[currentNeuron] - upper_sum, 0 ) + - FloatUtils::max( lower_sum - point[currentNeuron], 0 ); + return std::max( _ub[i] - upperSum, lowerSum - _lb[i] ); } void Layer::computeParameterisedSymbolicBounds( double coeff, bool receive ) @@ -3425,6 +3423,10 @@ void Layer::computeParameterisedSymbolicBounds( double coeff, bool receive ) computeParameterisedSymbolicBoundsForLeakyRelu( coeff ); break; + case BILINEAR: + computeParameterisedSymbolicBoundsForBilinear( coeff ); + break; + default: computeSymbolicBounds(); break; @@ -3940,28 +3942,19 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) // use the heuristic described in section 4.1 of // https://files.sri.inf.ethz.ch/website/papers/DeepPoly.pdf // to set the value of lambda (either 0 or 1 is considered). - if ( sourceUb > sourceLb ) - { - // lambda = 1 - // Symbolic lower bound: x_f >= x_b - // Concrete lower bound: x_f >= sourceLb - // Lower bounds are passed as is - } - else - { - // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = - // alpha and lambda = 1). Symbolic lower bound: x_f >= ((1 - coeff) * weight + - // alpha) x_b Concrete lower bound: x_f >= 0 + // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = + // alpha and lambda = 1). Symbolic lower bound: x_f >= ((1 - alpha) * coeff + + // alpha) x_b Concrete lower bound: x_f >= 0 - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicLb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; - } - - _symbolicLowerBias[i] *= _alpha; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicLb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; } + + _symbolicLowerBias[i] *= ( 1 - _alpha ) * coeff + _alpha; } + else { for ( unsigned j = 0; j < _inputLayerSize; ++j ) @@ -3973,19 +3966,16 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) _symbolicLowerBias[i] *= weight; _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; - if ( sourceUb > sourceLb ) + // lambda = ((1 - alpha) * coeff + alpha) (varies continuously between lambda = + // alpha and lambda = 1). Symbolic lower bound: x_f >= ((1 - alpha) * coeff + + // alpha) x_b Concrete lower bound: x_f >= 0 + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - // Upper bounds are passed as is + _symbolicUb[j * _size + i] *= ( 1 - _alpha ) * coeff + _alpha; } - else - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _symbolicUb[j * _size + i] *= _alpha; - } - _symbolicUpperBias[i] *= _alpha; - } + _symbolicUpperBias[i] *= ( 1 - _alpha ) * coeff + _alpha; } /* @@ -4083,6 +4073,233 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) } } +void Layer::computeParameterisedSymbolicBoundsForBilinear( double coeff ) +{ + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); + std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + { + _symbolicLowerBias[i] = _eliminatedNeurons[i]; + _symbolicUpperBias[i] = _eliminatedNeurons[i]; + + _symbolicLbOfLb[i] = _eliminatedNeurons[i]; + _symbolicUbOfLb[i] = _eliminatedNeurons[i]; + _symbolicLbOfUb[i] = _eliminatedNeurons[i]; + _symbolicUbOfUb[i] = _eliminatedNeurons[i]; + } + } + + for ( unsigned i = 0; i < _size; ++i ) + { + if ( _eliminatedNeurons.exists( i ) ) + continue; + + ASSERT( _neuronToActivationSources.exists( i ) ); + List sources = getActivationSources( i ); + ASSERT( sources.size() == 2 ); + + const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); + + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + + Vector sourceLbs; + Vector sourceUbs; + Vector sourceValues; + bool allConstant = true; + unsigned indexA = 0; + unsigned indexB = 0; + unsigned counter = 0; + for ( const auto &sourceIndex : sources ) + { + unsigned sourceNeuron = sourceIndex._neuron; + double sourceLb = sourceLayer->getLb( sourceNeuron ); + double sourceUb = sourceLayer->getUb( sourceNeuron ); + + sourceLbs.append( sourceLb ); + sourceUbs.append( sourceUb ); + + if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) + { + allConstant = false; + } + else + { + double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); + sourceValues.append( sourceValue ); + } + + if ( counter == 0 ) + { + indexA = sourceIndex._neuron; + } + else + { + indexB = sourceIndex._neuron; + } + ++counter; + } + + if ( allConstant ) + { + // If the both source neurons have been eliminated, this neuron is constant + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + _symbolicUpperBias[i] = sourceValues[0] * sourceValues[1]; + _symbolicLowerBias[i] = sourceValues[0] * sourceValues[1]; + continue; + } + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _symbolicUb[j * _size + i] = 0; + _symbolicLb[j * _size + i] = 0; + } + + // Billinear linear relaxation (arXiv:2405.21063v2 [cs.LG]) + // Lower bound: out >= a_l * x + b_l * y + c_l, where + // a_l = alpha1 * l_y + ( 1 - alpha1 ) * u_y + // b_l = alpha1 * l_x + ( 1 - alpha1 ) * u_x + // c_l = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y + + // Upper bound: out <= a_u * x + b_u * y + c_u, where + // a_u = alpha2 * u_y + ( 1 - alpha2 ) * l_y + // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x + // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + + // Here assume alpha1 = alpha2 = coeff (and b_l = b_u). + + double a_l = coeff * sourceLbs[1] + ( 1 - coeff ) * sourceUbs[1]; + double a_u = coeff * sourceUbs[1] + ( 1 - coeff ) * sourceLbs[1]; + double b = coeff * sourceLbs[0] + ( 1 - coeff ) * sourceUbs[0]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + if ( a_l >= 0 ) + { + _symbolicLb[j * _size + i] += a_l * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + else + { + _symbolicLb[j * _size + i] += a_l * sourceSymbolicUb[j * sourceLayerSize + indexA]; + } + + if ( a_u >= 0 ) + { + _symbolicUb[j * _size + i] += a_u * sourceSymbolicUb[j * sourceLayerSize + indexA]; + } + else + { + _symbolicLb[j * _size + i] += a_u * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + + if ( b >= 0 ) + { + _symbolicLb[j * _size + i] += b * sourceSymbolicLb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += b * sourceSymbolicUb[j * sourceLayerSize + indexB]; + } + else + { + _symbolicLb[j * _size + i] += b * sourceSymbolicUb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += b * sourceSymbolicLb[j * sourceLayerSize + indexB]; + } + } + + _symbolicLowerBias[i] = + -coeff * sourceLbs[0] * sourceLbs[1] - ( 1 - coeff ) * sourceUbs[0] * sourceUbs[1]; + _symbolicUpperBias[i] = + -coeff * sourceLbs[0] * sourceUbs[1] - ( 1 - coeff ) * sourceUbs[0] * sourceLbs[1]; + + double lb = FloatUtils::infinity(); + double ub = FloatUtils::negativeInfinity(); + List values = { sourceLbs[0] * sourceLbs[1], + sourceLbs[0] * sourceUbs[1], + sourceUbs[0] * sourceLbs[1], + sourceUbs[0] * sourceUbs[1] }; + for ( const auto &v : values ) + { + if ( v < lb ) + lb = v; + if ( v > ub ) + ub = v; + } + + /* + We now have the symbolic representation for the current + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + */ + _symbolicLbOfLb[i] = _symbolicLowerBias[i]; + _symbolicUbOfLb[i] = _symbolicLowerBias[i]; + _symbolicLbOfUb[i] = _symbolicUpperBias[i]; + _symbolicUbOfUb[i] = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double inputLb = _layerOwner->getLayer( 0 )->getLb( j ); + double inputUb = _layerOwner->getLayer( 0 )->getUb( j ); + + double entry = _symbolicLb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfLb[i] += ( entry * inputLb ); + _symbolicUbOfLb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfLb[i] += ( entry * inputUb ); + _symbolicUbOfLb[i] += ( entry * inputLb ); + } + + entry = _symbolicUb[j * _size + i]; + + if ( entry >= 0 ) + { + _symbolicLbOfUb[i] += ( entry * inputLb ); + _symbolicUbOfUb[i] += ( entry * inputUb ); + } + else + { + _symbolicLbOfUb[i] += ( entry * inputUb ); + _symbolicUbOfUb[i] += ( entry * inputLb ); + } + } + + /* + We now have the tightest bounds we can for the relu + variable. If they are tigheter than what was previously + known, store them. + */ + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + } + + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } + } +} + +void Layer::computeParameterisedSymbolicBoundsForSigmoid( double coeff ) +{ +} + double Layer::softmaxLSELowerBound( const Vector &inputs, const Vector &inputLbs, const Vector &inputUbs, diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index ee9e1aba05..c73cdb3e18 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -301,6 +301,8 @@ class Layer void computeParameterisedSymbolicBoundsForRelu( double coeff ); void computeParameterisedSymbolicBoundsForSign( double coeff ); void computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ); + void computeParameterisedSymbolicBoundsForBilinear( double coeff ); + void computeParameterisedSymbolicBoundsForSigmoid( double coeff ); /* Helper functions for interval bound tightening From 927302e5fa13c8910066e935bae9a2412034e666 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 9 Jan 2025 20:54:41 +0200 Subject: [PATCH 41/72] Add files via upload --- src/nlr/tests/Test_NetworkLevelReasoner.h | 1332 ++++++++++++++++++++- 1 file changed, 1288 insertions(+), 44 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 977faa4ed6..91bd047687 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -19,6 +19,7 @@ #include "Layer.h" #include "NetworkLevelReasoner.h" #include "Options.h" +#include "PolygonalTightening.h" #include "Query.h" #include "Tightening.h" #include "Vector.h" @@ -1936,7 +1937,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - 1 R -1 R -1 2 + 1 R -1 R -1 x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -1945,7 +1946,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ R / \ R / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 + 1 -1 2 2 The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -2217,7 +2218,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - 1 S -1 S -1 2 + 1 S -1 S -1 x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -2226,7 +2227,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ S / \ S / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 + 1 -1 2 2 The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -2495,7 +2496,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - 1 Sign -1 Sign -1 2 + 1 Sign -1 Sign -1 x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -2504,7 +2505,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ Sign / \ Sign / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 + 1 -1 2 2 The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -2773,7 +2774,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - 1 Rnd -1 Rnd -1 2 + 1 Rnd -1 Rnd -1 x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -2782,7 +2783,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ Rnd / \ Rnd / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 + 1 -1 2 2 The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -3051,7 +3052,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - 1 A -1 A -1 2 + 1 A -1 A -1 x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -3060,7 +3061,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ A / \ A / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 + 1 -1 2 2 The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -3329,7 +3330,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* - 1 LR -1 LR -1 2 + 1 LR -1 LR -1 x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -3338,7 +3339,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ LR / \ LR / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 + 1 -1 2 2 The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -12164,6 +12165,383 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } + void test_preimage_approximation_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + // From parameterised SBT: + Tightening( 2, -1, Tightening::LB ), + Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), + Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), + Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, 0.38, Tightening::UB ), + + // From parameterised LP: + Tightening( 8, 0, Tightening::LB ), + Tightening( 10, -2, Tightening::LB ), + Tightening( 10, 0, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + // From parameterised SBT: + Tightening( 2, -5, Tightening::LB ), + Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), + Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 3, Tightening::UB ), + + Tightening( 7, 4, Tightening::UB ), + Tightening( 8, 2, Tightening::UB ), + + Tightening( 9, 4, Tightening::UB ), + Tightening( 10, 0.38, Tightening::UB ), + + // From parameterised LP: + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + + Tightening( 11, -0.93, Tightening::LB ), + Tightening( 10, 2, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + void test_preimage_approximation_leaky_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); @@ -12173,7 +12551,685 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite NLR::NetworkLevelReasoner nlr; MockTableau tableau; nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); tableau.setLowerBound( 0, 0 ); tableau.setUpperBound( 0, 1 ); @@ -12186,20 +13242,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), } ); List bounds; @@ -12212,29 +13268,29 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - /*List expectedBounds2( - { Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + List expectedBounds2( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2.1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.35, Tightening::UB ), - Tightening( 11, 1.35, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); for ( auto &bound : bounds ) { bound.dump(); } - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) );*/ + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); // Change the current bounds @@ -12291,21 +13347,209 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - // Invoke backward LP propagation + // Invoke PreimageApproximation TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - /*List expectedBounds4( - { Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), + List expectedBounds4( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), + Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) );*/ + for ( auto &bound : bounds ) + { + bound.dump(); + } + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } bool boundsEqual( const List &bounds, const List &expectedBounds ) From 73ce3434e6a09640cf74c50027e37ac19d644faa Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 9 Jan 2025 20:55:25 +0200 Subject: [PATCH 42/72] Add files via upload --- src/engine/PolygonalTightening.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/engine/PolygonalTightening.h b/src/engine/PolygonalTightening.h index 0f811f6fe6..99dabafa85 100644 --- a/src/engine/PolygonalTightening.h +++ b/src/engine/PolygonalTightening.h @@ -71,7 +71,8 @@ class PolygonalTightening } allFound &= currentFound; } - bool result = allFound && _value == other._value && _type == other._type; + bool result = allFound && _value == other._value && _type == other._type && + _neuronToCoefficient.size() == other._neuronToCoefficient.size(); return result; } From 714b4e4831471361085efb84f37ba3eaa1fb4153 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 9 Jan 2025 20:56:14 +0200 Subject: [PATCH 43/72] Add files via upload --- src/configuration/GlobalConfiguration.cpp | 8 ++++---- src/configuration/GlobalConfiguration.h | 3 +++ 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index 3a08b92234..a5d5100011 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -69,10 +69,10 @@ const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; const unsigned GlobalConfiguration::SIMULATION_RANDOM_SEED = 1; const unsigned GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED = 1; const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED = 1; -const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 1000; -const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 100; -const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.1; -const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.05; +const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 25000; +const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 25; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.025; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.25; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index 78ab9f662e..92dcfdd049 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -166,6 +166,9 @@ class GlobalConfiguration // Step size for PreimageApproximation optimization. static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + // Learning rate for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + // How often should projected steepest edge reset the reference space? static const unsigned PSE_ITERATIONS_BEFORE_RESET; From 061bb5705101a6eb500233368fe09d4d01869b4f Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 16 Jan 2025 20:32:08 +0200 Subject: [PATCH 44/72] Add files via upload --- src/nlr/LPFormulator.cpp | 174 +++++++++++++++++++++++-------- src/nlr/LPFormulator.h | 27 +++-- src/nlr/Layer.cpp | 137 +++++++++++++++--------- src/nlr/Layer.h | 13 +-- src/nlr/NetworkLevelReasoner.cpp | 13 ++- src/nlr/NetworkLevelReasoner.h | 2 +- 6 files changed, 249 insertions(+), 117 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 31e5c31a4e..c9ff9eb6d6 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -346,11 +346,11 @@ void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map>(); - unsigned depth = layers.size(); - opt_params.lower_bounds.resize( depth, 0 ); - opt_params.upper_bounds.resize( depth, 1 ); + unsigned num = getNumberOfParameters( layers ); + opt_params.lower_bounds.resize( num, 0 ); + opt_params.upper_bounds.resize( num, 1 ); double value_to_reach = 0; std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); @@ -358,15 +358,28 @@ void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map> layerIndicesToParameters = + getParametersForLayers( layers, optimal_coeffs ); + for ( auto pair : layers ) { - std::cout << "optimal_coeffs[" << i << "] = " << optimal_coeffs[i] << std::endl; + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + std::vector currentLayerCoeffs = layerIndicesToParameters[layerIndex]; if ( i == layers.size() - 1 ) - layers[i]->computeParameterisedSymbolicBounds( optimal_coeffs[i], true ); + { + layer->computeParameterisedSymbolicBounds( currentLayerCoeffs, true, false ); + } else - layers[i]->computeParameterisedSymbolicBounds( optimal_coeffs[i] ); + { + layer->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + } + i++; } + // Finally, run parameterised forward LP and parameterised backward LP. + optimizeBoundsWithLpRelaxation( layers, false, optimal_coeffs ); optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); } @@ -374,8 +387,15 @@ double LPFormulator::EstimateVolume( const Map &layers, std::vector coeffs ) { // First, run parameterised symbolic bound propagation. - for ( unsigned i = 0; i < layers.size(); ++i ) - layers[i]->computeParameterisedSymbolicBounds( coeffs[i] ); + Map> layerIndicesToParameters = + getParametersForLayers( layers, coeffs ); + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + std::vector currentLayerCoeffs = layerIndicesToParameters[layerIndex]; + layer->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + } std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); double log_box_volume = 0; @@ -383,8 +403,8 @@ double LPFormulator::EstimateVolume( const Map &layers, unsigned inputLayerIndex = 0; unsigned outputLayerIndex = layers.size() - 1; - const Layer *inputLayer = layers[inputLayerIndex]; - const Layer *outputLayer = layers[outputLayerIndex]; + Layer *inputLayer = layers[inputLayerIndex]; + Layer *outputLayer = layers[outputLayerIndex]; // Calculate volume of input variables' bounding box. for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) @@ -756,8 +776,10 @@ void LPFormulator::createLPRelaxation( const Map &layers, addLayerToModel( gurobi, layer.second, false ); else { - double coeff = coeffs[currentLayerIndex]; - addLayerToParameterisedModel( gurobi, layer.second, false, coeff ); + Map> layerIndicesToParameters = + getParametersForLayers( layers, coeffs ); + std::vector currentLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; + addLayerToParameterisedModel( gurobi, layer.second, false, currentLayerCoeffs ); } } } @@ -787,8 +809,11 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers addLayerToModel( gurobi, currentLayer, true ); else { - double coeff = coeffs[currentLayerIndex]; - addLayerToParameterisedModel( gurobi, currentLayer, true, coeff ); + Map> layerIndicesToParameters = + getParametersForLayers( layers, coeffs ); + std::vector currentLayerCoeffs = + layerIndicesToParameters[currentLayerIndex]; + addLayerToParameterisedModel( gurobi, currentLayer, true, currentLayerCoeffs ); } for ( const auto &nextLayer : currentLayer->getSuccessorLayers() ) @@ -1802,27 +1827,92 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, } } +unsigned LPFormulator::getNumberOfParameters( const Map &layers ) +{ + unsigned index = 0; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + switch ( layer->getLayerType() ) + { + case Layer::RELU: + case Layer::LEAKY_RELU: + index++; + break; + + case Layer::SIGN: + case Layer::BILINEAR: + index += 2; + break; + + default: + break; + } + } + return index; +} + +Map> +LPFormulator::getParametersForLayers( const Map &layers, + std::vector coeffs ) +{ + unsigned index = 0; + Map> layerIndicesToParameters; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + std::vector parameters = {}; + switch ( layer->getLayerType() ) + { + case Layer::RELU: + case Layer::LEAKY_RELU: + { + parameters = std::vector( coeffs.begin() + index, coeffs.begin() + index + 1 ); + layerIndicesToParameters.insert( layerIndex, parameters ); + index++; + } + break; + + case Layer::SIGN: + case Layer::BILINEAR: + { + parameters = std::vector( coeffs.begin() + index, coeffs.begin() + index + 2 ); + layerIndicesToParameters.insert( layerIndex, parameters ); + index += 2; + } + break; + + default: + layerIndicesToParameters.insert( layerIndex, parameters ); + break; + } + } + return layerIndicesToParameters; +} + void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ) + std::vector coeffs ) { switch ( layer->getLayerType() ) { case Layer::RELU: - addReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + addReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); break; case Layer::LEAKY_RELU: - addLeakyReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + addLeakyReluLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); break; case Layer::SIGN: - addSignLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + addSignLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); break; case Layer::BILINEAR: - addBilinearLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeff ); + addBilinearLayerToParameterisedLpRelaxation( gurobi, layer, createVariables, coeffs ); break; default: @@ -1834,8 +1924,9 @@ void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ) + std::vector coeffs ) { + double coeff = coeffs[0]; for ( unsigned i = 0; i < layer->getSize(); ++i ) { if ( !layer->neuronEliminated( i ) ) @@ -1934,7 +2025,7 @@ void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurob void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ) + std::vector coeffs ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { @@ -1993,25 +2084,25 @@ void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurob /* 2 - y <= ----- * (1 - coeff) x + 1 + y <= ----- * coeff[0] * x + 1 - l Varies continuously between y <= 1 and y <= -2/l * x + 1. */ List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( 2.0 / sourceLb * ( 1 - coeff ), + terms.append( GurobiWrapper::Term( 2.0 / sourceLb * coeffs[0], Stringf( "x%u", sourceVariable ) ) ); gurobi.addLeqConstraint( terms, 1 ); /* 2 - y >= ----- * (1 - coeff) x - 1 + y >= ----- * coeffs[1] * x - 1 u Varies continuously between y >= -1 and y >= 2/u * x - 1. */ terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); - terms.append( GurobiWrapper::Term( -2.0 / sourceUb * ( 1 - coeff ), + terms.append( GurobiWrapper::Term( -2.0 / sourceUb * coeffs[1], Stringf( "x%u", sourceVariable ) ) ); gurobi.addGeqConstraint( terms, -1 ); } @@ -2021,9 +2112,10 @@ void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurob void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ) + std::vector coeffs ) { double slope = layer->getAlpha(); + double coeff = coeffs[0]; for ( unsigned i = 0; i < layer->getSize(); ++i ) { if ( !layer->neuronEliminated( i ) ) @@ -2120,7 +2212,7 @@ void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper & void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ) + std::vector coeffs ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { @@ -2197,42 +2289,34 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y - // Here assume alpha1 = alpha2 = coeff (and b_l = b_u). List terms; terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( - -coeff * sourceLbs[1] - ( 1 - coeff ) * sourceUbs[1], + -coeffs[0] * sourceLbs[1] - ( 1 - coeffs[0] ) * sourceUbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( - -coeff * sourceLbs[0] - ( 1 - coeff ) * sourceUbs[0], + -coeffs[0] * sourceLbs[0] - ( 1 - coeffs[0] ) * sourceUbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addGeqConstraint( terms, - -coeff * sourceLbs[0] * sourceLbs[1] - - ( 1 - coeff ) * sourceUbs[0] * sourceUbs[1] ); + -coeffs[0] * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1] ); terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( - -coeff * sourceUbs[1] - ( 1 - coeff ) * sourceLbs[1], + -coeffs[1] * sourceUbs[1] - ( 1 - coeffs[1] ) * sourceLbs[1], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( - -coeff * sourceLbs[0] - ( 1 - coeff ) * sourceUbs[0], + -coeffs[1] * sourceLbs[0] - ( 1 - coeffs[1] ) * sourceUbs[0], Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addLeqConstraint( terms, - -coeff * sourceLbs[0] * sourceUbs[1] - - ( 1 - coeff ) * sourceUbs[0] * sourceLbs[1] ); + -coeffs[1] * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1] ); } } } -void LPFormulator::addSigmoidLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ) -{ -} - void LPFormulator::setCutoff( double cutoff ) { _cutoffInUse = true; diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index cdfca2e81d..c9164f0011 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -91,6 +91,16 @@ class LPFormulator : public ParallelSolver void addLayerToModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); + + // Get total number of optimizable parameters for parameterised SBT/LP relaxation. + unsigned getNumberOfParameters( const Map &layers ); + + + // Get map containing vector of optimizable parameters for parameterised SBT/LP relaxation for + // every layer index. + static Map> + getParametersForLayers( const Map &layers, std::vector coeffs ); + private: LayerOwner *_layerOwner; bool _cutoffInUse; @@ -139,36 +149,31 @@ class LPFormulator : public ParallelSolver std::vector coeffs = {} ); - // Create LP relaxations depending on an external parameter. + // Create LP relaxations depending on external parameters. void addLayerToParameterisedModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ); + std::vector coeffs ); void addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ); + std::vector coeffs ); void addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ); + std::vector coeffs ); void addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ); + std::vector coeffs ); void addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - double coeff ); - - void addSigmoidLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, - const Layer *layer, - bool createVariables, - double coeff ); + std::vector coeffs ); // Estimate Volume of parameterised LP relaxations. diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index cfb9749014..0ebb7872df 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -3122,7 +3122,7 @@ void Layer::computeSymbolicBoundsForBilinear() } else { - _symbolicLb[j * _size + i] += + _symbolicUb[j * _size + i] += sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; } @@ -3407,24 +3407,26 @@ double Layer::calculateDifferenceFromSymbolic( Map &point, return std::max( _ub[i] - upperSum, lowerSum - _lb[i] ); } -void Layer::computeParameterisedSymbolicBounds( double coeff, bool receive ) +void Layer::computeParameterisedSymbolicBounds( std::vector coeffs, + bool receivePolygonal, + bool receive ) { switch ( _type ) { case RELU: - computeParameterisedSymbolicBoundsForRelu( coeff ); + computeParameterisedSymbolicBoundsForRelu( coeffs, receive ); break; case SIGN: - computeParameterisedSymbolicBoundsForSign( coeff ); + computeParameterisedSymbolicBoundsForSign( coeffs, receive ); break; case LEAKY_RELU: - computeParameterisedSymbolicBoundsForLeakyRelu( coeff ); + computeParameterisedSymbolicBoundsForLeakyRelu( coeffs, receive ); break; case BILINEAR: - computeParameterisedSymbolicBoundsForBilinear( coeff ); + computeParameterisedSymbolicBoundsForBilinear( coeffs, receive ); break; default: @@ -3432,7 +3434,7 @@ void Layer::computeParameterisedSymbolicBounds( double coeff, bool receive ) break; } - if ( receive ) + if ( receivePolygonal ) { for ( unsigned i = 0; i < _size; ++i ) { @@ -3454,9 +3456,13 @@ void Layer::computeParameterisedSymbolicBounds( double coeff, bool receive ) } } -void Layer::computeParameterisedSymbolicBoundsForRelu( double coeff ) +void Layer::computeParameterisedSymbolicBoundsForRelu( std::vector coeffs, bool receive ) { + ASSERT( coeffs.size() == 1 ); + + double coeff = coeffs[0]; ASSERT( coeff >= 0 && coeff <= 1 ); + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); @@ -3613,22 +3619,29 @@ void Layer::computeParameterisedSymbolicBoundsForRelu( double coeff ) if ( _lb[i] < _symbolicLbOfLb[i] ) { _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + + if ( receive ) + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } if ( _ub[i] > _symbolicUbOfUb[i] ) { _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + + if ( receive ) + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); } } } -void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) +void Layer::computeParameterisedSymbolicBoundsForSign( std::vector coeffs, bool receive ) { - ASSERT( coeff >= 0 && coeff <= 1 ); + ASSERT( coeffs.size() == 2 ); + ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); + ASSERT( coeffs[1] >= 0 && coeffs[1] <= 1 ); + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); @@ -3726,9 +3739,9 @@ void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) else { // The upper bound's phase is not fixed, use parameterised - // parallelogram approximation: y >= 2 / l * ( 1 - coeff ) x + 1 + // parallelogram approximation: y <= - 2 / l * coeffs[0] * x + 1 // (varies continuously between y <= 1 and y <= -2 / l * x + 1). - double factor = -2.0 / _symbolicLbOfLb[i] * ( 1 - coeff ); + double factor = -2.0 / _symbolicLbOfLb[i] * coeffs[0]; for ( unsigned j = 0; j < _inputLayerSize; ++j ) _symbolicUb[j * _size + i] *= factor; @@ -3754,9 +3767,9 @@ void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) else { // The lower bound's phase is not fixed, use parameterised - // parallelogram approximation: y >= 2 / u * ( 1 - coeff ) x - 1 + // parallelogram approximation: y >= 2 / u * coeffs[1] * x - 1 // (varies continuously between y >= -1 and y >= 2 / u * x - 1). - double factor = 2.0 / _symbolicUbOfUb[i] * ( 1 - coeff ); + double factor = 2.0 / _symbolicUbOfUb[i] * coeffs[1]; for ( unsigned j = 0; j < _inputLayerSize; ++j ) { @@ -3823,22 +3836,29 @@ void Layer::computeParameterisedSymbolicBoundsForSign( double coeff ) if ( _lb[i] < _symbolicLbOfLb[i] ) { _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + + if ( receive ) + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } if ( _ub[i] > _symbolicUbOfUb[i] ) { _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + + if ( receive ) + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); } } } -void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) +void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( std::vector coeffs, + bool receive ) { ASSERT( _alpha > 0 && _alpha < 1 ); + ASSERT( coeffs.size() == 1 ); + double coeff = coeffs[0]; ASSERT( coeff >= 0 && coeff <= 1 ); std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); @@ -4060,21 +4080,30 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ) if ( _lb[i] < _symbolicLbOfLb[i] ) { _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + + if ( receive ) + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } if ( _ub[i] > _symbolicUbOfUb[i] ) { _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + + if ( receive ) + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); } } } -void Layer::computeParameterisedSymbolicBoundsForBilinear( double coeff ) +void Layer::computeParameterisedSymbolicBoundsForBilinear( std::vector coeffs, + bool receive ) { + ASSERT( coeffs.size() == 2 ); + ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); + ASSERT( coeffs[1] >= 0 && coeffs[1] <= 1 ); + std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); std::fill_n( _symbolicUb, _size * _inputLayerSize, 0 ); @@ -4175,11 +4204,10 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( double coeff ) // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y - // Here assume alpha1 = alpha2 = coeff (and b_l = b_u). - - double a_l = coeff * sourceLbs[1] + ( 1 - coeff ) * sourceUbs[1]; - double a_u = coeff * sourceUbs[1] + ( 1 - coeff ) * sourceLbs[1]; - double b = coeff * sourceLbs[0] + ( 1 - coeff ) * sourceUbs[0]; + double a_l = coeffs[0] * sourceLbs[1] + ( 1 - coeffs[0] ) * sourceUbs[1]; + double a_u = coeffs[1] * sourceUbs[1] + ( 1 - coeffs[1] ) * sourceLbs[1]; + double b_l = coeffs[0] * sourceLbs[0] + ( 1 - coeffs[0] ) * sourceUbs[0]; + double b_u = coeffs[1] * sourceLbs[0] + ( 1 - coeffs[1] ) * sourceUbs[0]; for ( unsigned j = 0; j < _inputLayerSize; ++j ) { @@ -4198,25 +4226,32 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( double coeff ) } else { - _symbolicLb[j * _size + i] += a_u * sourceSymbolicLb[j * sourceLayerSize + indexA]; + _symbolicUb[j * _size + i] += a_u * sourceSymbolicLb[j * sourceLayerSize + indexA]; + } + + if ( b_l >= 0 ) + { + _symbolicLb[j * _size + i] += b_l * sourceSymbolicLb[j * sourceLayerSize + indexB]; + } + else + { + _symbolicLb[j * _size + i] += b_l * sourceSymbolicUb[j * sourceLayerSize + indexB]; } - if ( b >= 0 ) + if ( b_l >= 0 ) { - _symbolicLb[j * _size + i] += b * sourceSymbolicLb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += b * sourceSymbolicUb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += b_u * sourceSymbolicUb[j * sourceLayerSize + indexB]; } else { - _symbolicLb[j * _size + i] += b * sourceSymbolicUb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += b * sourceSymbolicLb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += b_u * sourceSymbolicLb[j * sourceLayerSize + indexB]; } } - _symbolicLowerBias[i] = - -coeff * sourceLbs[0] * sourceLbs[1] - ( 1 - coeff ) * sourceUbs[0] * sourceUbs[1]; - _symbolicUpperBias[i] = - -coeff * sourceLbs[0] * sourceUbs[1] - ( 1 - coeff ) * sourceUbs[0] * sourceLbs[1]; + _symbolicLowerBias[i] = -coeffs[0] * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1]; + _symbolicUpperBias[i] = -coeffs[1] * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1]; double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); @@ -4283,23 +4318,23 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( double coeff ) if ( _lb[i] < _symbolicLbOfLb[i] ) { _lb[i] = _symbolicLbOfLb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); + + if ( receive ) + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); } if ( _ub[i] > _symbolicUbOfUb[i] ) { _ub[i] = _symbolicUbOfUb[i]; - _layerOwner->receiveTighterBound( - Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + + if ( receive ) + _layerOwner->receiveTighterBound( + Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); } } } -void Layer::computeParameterisedSymbolicBoundsForSigmoid( double coeff ) -{ -} - double Layer::softmaxLSELowerBound( const Vector &inputs, const Vector &inputLbs, const Vector &inputUbs, diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index c73cdb3e18..fef75a1f8a 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -137,7 +137,9 @@ class Layer void obtainCurrentBounds( const Query &inputQuery ); void obtainCurrentBounds(); void computeSymbolicBounds(); - void computeParameterisedSymbolicBounds( double coeff, bool receive = false ); + void computeParameterisedSymbolicBounds( std::vector coeffs, + bool receivePolygonal = false, + bool receive = false ); double calculateDifferenceFromSymbolic( Map &point, unsigned i ) const; void computeIntervalArithmeticBounds(); @@ -298,11 +300,10 @@ class Layer /* Helper functions for parameterised symbolic bound tightening */ - void computeParameterisedSymbolicBoundsForRelu( double coeff ); - void computeParameterisedSymbolicBoundsForSign( double coeff ); - void computeParameterisedSymbolicBoundsForLeakyRelu( double coeff ); - void computeParameterisedSymbolicBoundsForBilinear( double coeff ); - void computeParameterisedSymbolicBoundsForSigmoid( double coeff ); + void computeParameterisedSymbolicBoundsForRelu( std::vector coeffs, bool receive ); + void computeParameterisedSymbolicBoundsForSign( std::vector coeffs, bool receive ); + void computeParameterisedSymbolicBoundsForLeakyRelu( std::vector coeffs, bool receive ); + void computeParameterisedSymbolicBoundsForBilinear( std::vector coeffs, bool receive ); /* Helper functions for interval bound tightening diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 2930770c6d..8b82ae140b 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -221,10 +221,17 @@ void NetworkLevelReasoner::symbolicBoundPropagation() _layerIndexToLayer[i]->computeSymbolicBounds(); } -void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( Map coeffs ) +void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( std::vector coeffs ) { - for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) - _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( coeffs[i] ); + Map> layerIndicesToParameters = + LPFormulator::getParametersForLayers( _layerIndexToLayer, coeffs ); + for ( auto pair : _layerIndexToLayer ) + { + unsigned layerIndex = pair.first; + Layer *layer = _layerIndexToLayer[layerIndex]; + std::vector currentLayerCoeffs = layerIndicesToParameters[layerIndex]; + layer->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + } } void NetworkLevelReasoner::deepPolyPropagation() diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index f6bf6cf72f..45e2f59997 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -137,7 +137,7 @@ class NetworkLevelReasoner : public LayerOwner void obtainCurrentBounds(); void intervalArithmeticBoundPropagation(); void symbolicBoundPropagation(); - void parameterisedSymbolicBoundPropagation( Map coeffs ); + void parameterisedSymbolicBoundPropagation( std::vector coeffs ); void deepPolyPropagation(); void lpRelaxationPropagation(); void LPTighteningForOneLayer( unsigned targetIndex ); From 7d4a5a4cbd9e63dc77ebd6672b236f234894f47d Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 16 Jan 2025 20:33:12 +0200 Subject: [PATCH 45/72] Add files via upload --- src/nlr/tests/Test_NetworkLevelReasoner.h | 605 +++++++++------------- 1 file changed, 242 insertions(+), 363 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 91bd047687..b23deca8cf 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -12203,7 +12203,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); @@ -12214,37 +12214,12 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - // From parameterised SBT: - Tightening( 2, -1, Tightening::LB ), - Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), - Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), - Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), - Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), - Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, 0.38, Tightening::UB ), - - // From parameterised LP: - Tightening( 8, 0, Tightening::LB ), - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1.625, Tightening::LB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); @@ -12308,34 +12283,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { - // From parameterised SBT: - Tightening( 2, -5, Tightening::LB ), - Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), - Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 3, Tightening::UB ), - - Tightening( 7, 4, Tightening::UB ), - Tightening( 8, 2, Tightening::UB ), - - Tightening( 9, 4, Tightening::UB ), - Tightening( 10, 0.38, Tightening::UB ), - - // From parameterised LP: - Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - - Tightening( 11, -0.93, Tightening::LB ), - Tightening( 10, 2, Tightening::UB ), + Tightening( 11, 0.8472, Tightening::LB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -12390,7 +12342,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); @@ -12401,27 +12353,19 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 12, -1.75, Tightening::LB ), + Tightening( 13, -4.25, Tightening::LB ), + Tightening( 13, 3.25, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 17, -11.1417, Tightening::LB ), + Tightening( 17, 10, Tightening::UB ), - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 21, -17.3084, Tightening::LB ), + Tightening( 21, 3.2160, Tightening::UB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); @@ -12434,8 +12378,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 2, 2 ); double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -12518,27 +12460,21 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 11, -5, Tightening::LB ), + Tightening( 12, -4.6429, Tightening::LB ), + Tightening( 13, 8.5519, Tightening::UB ), - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 17, -23.6231, Tightening::LB ), + Tightening( 17, 14.0909, Tightening::UB ), + Tightening( 18, 2, Tightening::LB ), + Tightening( 18, 28.2015, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 21, -29.2015, Tightening::LB ), + Tightening( 21, 6.5734, Tightening::UB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -12580,7 +12516,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); @@ -12591,27 +12527,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1.8, Tightening::LB ), + Tightening( 10, 0, Tightening::UB ), - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 11, 1.4542, Tightening::LB ), + Tightening( 11, 1.4542, Tightening::LB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); @@ -12675,27 +12601,22 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, 3.1, Tightening::UB ), + Tightening( 7, -3.2, Tightening::LB ), + Tightening( 7, 4, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 8, 3.1, Tightening::UB ), + Tightening( 9, -0.32, Tightening::LB ), - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 10, -3.8726, Tightening::LB ), + Tightening( 10, 0.03, Tightening::UB ), + Tightening( 11, 0.4074, Tightening::LB ), + Tightening( 11, 11.3243, Tightening::UB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -12723,23 +12644,34 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); @@ -12750,27 +12682,32 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), + Tightening( 11, 8.5, Tightening::UB ), + Tightening( 12, -2.225, Tightening::LB ), + Tightening( 13, 2.975, Tightening::UB ), + Tightening( 13, -4.175, Tightening::LB ), + + Tightening( 15, -0.2225, Tightening::LB ), + Tightening( 16, 2.975, Tightening::UB ), + Tightening( 16, -0.4175, Tightening::LB ), + + Tightening( 17, -11.452, Tightening::LB ), + Tightening( 17, 10.18, Tightening::UB ), + Tightening( 18, 0.87, Tightening::LB ), + Tightening( 18, 16.0688, Tightening::UB ), + + Tightening( 19, -1.1452, Tightening::LB ), + Tightening( 19, 10.18, Tightening::UB ), + Tightening( 20, 0.87, Tightening::LB ), + Tightening( 20, 16.0688, Tightening::UB ), + + Tightening( 21, -17.0684, Tightening::LB ), + Tightening( 21, 3.6767, Tightening::UB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); @@ -12783,8 +12720,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 2, 2 ); double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -12803,6 +12738,26 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 10, large ); tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); // Invoke DeepPoly @@ -12810,20 +12765,31 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); @@ -12836,27 +12802,32 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 11, -5.6, Tightening::LB ), + Tightening( 11, 16.4636, Tightening::UB ), + Tightening( 12, -6.0286, Tightening::LB ), + Tightening( 13, -5.9, Tightening::LB ), + Tightening( 13, 8.0468, Tightening::UB ), + + Tightening( 15, -0.6029, Tightening::LB ), + Tightening( 16, -0.59, Tightening::LB ), + Tightening( 16, 8.0468, Tightening::UB ), + + Tightening( 17, -24.8864, Tightening::LB ), + Tightening( 17, 14.3076, Tightening::UB ), + Tightening( 18, 0.75, Tightening::LB ), + Tightening( 18, 28.0272, Tightening::UB ), + + Tightening( 19, -2.4886, Tightening::LB ), + Tightening( 19, 14.3076, Tightening::UB ), + Tightening( 20, 0.75, Tightening::LB ), + Tightening( 20, 28.0272, Tightening::UB ), + + Tightening( 21, -29.9648, Tightening::LB ), + Tightening( 21, 6.9619, Tightening::UB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -12898,7 +12869,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); @@ -12908,28 +12879,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds2( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); + List expectedBounds2( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); @@ -12992,28 +12945,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds4( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); + List expectedBounds4( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -13068,7 +13003,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); @@ -13078,28 +13013,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds2( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); + List expectedBounds2( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); @@ -13112,8 +13029,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 2, 2 ); double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -13195,28 +13110,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - List expectedBounds4( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); + List expectedBounds4( {} ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -13258,7 +13155,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); @@ -13269,27 +13166,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + Tightening( 11, -2.4149, Tightening::LB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); @@ -13327,20 +13208,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); @@ -13353,27 +13234,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 5.8235, Tightening::UB ), - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 10, -14, Tightening::LB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 11, -24.8805, Tightening::LB ), + Tightening( 11, 14, Tightening::UB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } @@ -13420,7 +13291,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), } ); - List bounds; + List bounds, newBounds; TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); @@ -13431,27 +13302,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 12, -1.75, Tightening::LB ), - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); @@ -13464,8 +13321,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 2, 2 ); double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); tableau.setLowerBound( 4, -large ); @@ -13528,33 +13383,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -0.1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 11, -5, Tightening::LB ), + Tightening( 12, -4.6429, Tightening::LB ), - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 0.25, Tightening::UB ), - Tightening( 11, 0.78, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), } ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - for ( auto &bound : bounds ) - { - bound.dump(); - } + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } bool boundsEqual( const List &bounds, const List &expectedBounds ) { - if ( bounds.size() != expectedBounds.size() ) + if ( bounds.size() < expectedBounds.size() ) return false; bool allFound = true; @@ -13573,6 +13415,43 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite return allFound; } + + // Create list of all tightenings in newBounds for which there is no bound in newBounds or in + // bounds which is at least as tight. + List removeRedundancies( const List &bounds, + const List &newBounds ) + { + List minimalBounds; + + for ( const auto &newBound : newBounds ) + { + bool foundTighter = false; + for ( const auto &bound : bounds ) + { + foundTighter |= + ( newBound._type == bound._type && newBound._variable == bound._variable && + ( ( newBound._type == Tightening::LB && + FloatUtils::lte( newBound._value, bound._value, 0.0001 ) ) || + ( newBound._type == Tightening::UB && + FloatUtils::gte( newBound._value, bound._value, 0.0001 ) ) ) ); + } + + for ( const auto &bound : newBounds ) + { + foundTighter |= + ( newBound._type == bound._type && newBound._variable == bound._variable && + ( ( newBound._type == Tightening::LB && + FloatUtils::lt( newBound._value, bound._value, 0.0001 ) ) || + ( newBound._type == Tightening::UB && + FloatUtils::gt( newBound._value, bound._value, 0.0001 ) ) ) ); + } + + if ( !foundTighter ) + minimalBounds.append( newBound ); + } + return minimalBounds; + } + void updateTableau( MockTableau &tableau, List &tightenings ) { ASSERT( tableau ); From 03a74a7396dd18dafcbaebd9347b59a6b6e3601f Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:24:27 +0200 Subject: [PATCH 46/72] Delete src/nlr/AdamOptimizer.h --- src/nlr/AdamOptimizer.h | 399 ---------------------------------------- 1 file changed, 399 deletions(-) delete mode 100644 src/nlr/AdamOptimizer.h diff --git a/src/nlr/AdamOptimizer.h b/src/nlr/AdamOptimizer.h deleted file mode 100644 index 018cd34325..0000000000 --- a/src/nlr/AdamOptimizer.h +++ /dev/null @@ -1,399 +0,0 @@ -/* - * Copyright Nick Thompson, 2024 - * Use, modification and distribution are subject to the - * Boost Software License, Version 1.0. (See accompanying file - * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - */ -#ifndef ADAM_OPTIMIZER -#define ADAM_OPTIMIZER - -#include "FloatUtils.h" -#include "GlobalConfiguration.h" - -#include // for std::sort -#include -#include -#include -#include -#include -#include -#include -#include // for std::false_type -#include -#include - -namespace NLR { - -template struct has_resize : std::false_type -{ -}; - -template -struct has_resize().resize( size_t{} ) )>> : std::true_type -{ -}; - -template constexpr bool has_resize_v = has_resize::value; - -template -void validate_bounds( ArgumentContainer const &lower_bounds, ArgumentContainer const &upper_bounds ) -{ - std::ostringstream oss; - - if ( lower_bounds.size() == 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The dimension of the problem cannot be zero."; - throw std::domain_error( oss.str() ); - } - - if ( upper_bounds.size() != lower_bounds.size() ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be the same number of lower bounds as upper bounds, but given "; - oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() - << " lower bounds."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < lower_bounds.size(); ++i ) - { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - if ( lb > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be greater than or equal to the lower bound, but the " - "upper bound is " - << ub << " and the lower is " << lb << "."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( lb ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The lower bound must be finite, but got " << lb << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a " - "standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( ub ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be finite, but got " << ub << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a " - "standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - } -} - -template -std::vector random_initial_population( ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds, - size_t initial_population_size, - URBG &&gen ) -{ - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - constexpr bool has_resize = has_resize_v; - std::vector population( initial_population_size ); - auto const dimension = lower_bounds.size(); - - for ( size_t i = 0; i < population.size(); ++i ) - { - if constexpr ( has_resize ) - population[i].resize( dimension ); - else - { - // Argument type must be known at compile-time; like std::array: - if ( population[i].size() != dimension ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": For containers which do not have resize, the default size must be the " - "same as the dimension, "; - oss << "but the default container size is " << population[i].size() - << " and the dimension of the problem is " << dimension << "."; - oss << " The function argument container type is " - << typeid( ArgumentContainer ).name() << ".\n"; - throw std::runtime_error( oss.str() ); - } - } - } - - // Why don't we provide an option to initialize with (say) a Gaussian distribution? - // > If the optimum's location is fairly well known, - // > a Gaussian distribution may prove somewhat faster, although it - // > may also increase the probability that the population will converge prematurely. - // > In general, uniform distributions are preferred, since they best reflect - // > the lack of knowledge about the optimum's location. - // - Differential Evolution: A Practical Approach to Global Optimization - // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are - // preferable. So this is something that could be investigated and potentially improved. - std::uniform_real_distribution dis( DimensionlessReal( 0 ), - DimensionlessReal( 1 ) ); - for ( size_t i = 0; i < population.size(); ++i ) - { - for ( size_t j = 0; j < dimension; ++j ) - { - auto const &lb = lower_bounds[j]; - auto const &ub = upper_bounds[j]; - population[i][j] = lb + dis( gen ) * ( ub - lb ); - } - } - - return population; -} - -template -void validate_initial_guess( ArgumentContainer const &initial_guess, - ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds ) -{ - std::ostringstream oss; - auto const dimension = lower_bounds.size(); - - if ( initial_guess.size() != dimension ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The initial guess must have the same dimensions as the problem,"; - oss << ", but the problem size is " << dimension << " and the initial guess has " - << initial_guess.size() << " elements."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < dimension; ++i ) - { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - - if ( !FloatUtils::isFinite( initial_guess[i] ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << ", the initial guess is " << initial_guess[i] - << ", make sure all elements of the initial guess are finite."; - throw std::domain_error( oss.str() ); - } - - if ( initial_guess[i] < lb || initial_guess[i] > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << " the initial guess " << initial_guess[i] - << " is not in the bounds [" << lb << ", " << ub << "]."; - throw std::domain_error( oss.str() ); - } - } -} - -// Single-Threaded Adam Optimizer - -// We provide the parameters in a struct-there are too many of them and they are too unwieldy to -// pass individually: -template struct adam_optimizer_parameters -{ - using Real = typename ArgumentContainer::value_type; - ArgumentContainer lower_bounds; - ArgumentContainer upper_bounds; - - Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; - Real lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; - Real epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; - Real beta1 = 0.9; - Real beta2 = 0.999; - Real weight_decay = 0; - bool maximize = false; - size_t max_iterations = 100; - ArgumentContainer const *initial_guess = nullptr; -}; - -template -void validate_adam_optimizer_parameters( - adam_optimizer_parameters const &cd_params ) -{ - std::ostringstream oss; - validate_bounds( cd_params.lower_bounds, cd_params.upper_bounds ); - - if ( FloatUtils::isNan( cd_params.step_size ) || cd_params.step_size <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": step_size > 0 is required, but got step_size=" << cd_params.step_size << "."; - throw std::domain_error( oss.str() ); - } - - if ( FloatUtils::isNan( cd_params.epsilon ) || cd_params.epsilon <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": epsilon > 0 is required, but got epsilon=" << cd_params.epsilon << "."; - throw std::domain_error( oss.str() ); - } - - if ( FloatUtils::isNan( cd_params.beta1 ) || cd_params.beta1 <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": beta1 > 0 is required, but got beta1=" << cd_params.beta1 << "."; - throw std::domain_error( oss.str() ); - } - - if ( FloatUtils::isNan( cd_params.beta2 ) || cd_params.beta2 <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": beta2 > 0 is required, but got beta2=" << cd_params.beta2 << "."; - throw std::domain_error( oss.str() ); - } - - if ( FloatUtils::isNan( cd_params.lr ) || cd_params.lr <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": lr > 0 is required, but got lr=" << cd_params.lr << "."; - throw std::domain_error( oss.str() ); - } - - if ( FloatUtils::isNan( cd_params.weight_decay ) || cd_params.weight_decay < 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": weight_decay >= 0 is required, but got weight_decay=" << cd_params.weight_decay - << "."; - throw std::domain_error( oss.str() ); - } - - if ( cd_params.max_iterations < 1 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be at least one generation."; - throw std::invalid_argument( oss.str() ); - } - - if ( cd_params.initial_guess ) - { - validate_initial_guess( - *cd_params.initial_guess, cd_params.lower_bounds, cd_params.upper_bounds ); - } -} - -template -ArgumentContainer adam_optimizer( - const Func cost_function, - adam_optimizer_parameters const &cd_params, - URBG &gen, - std::invoke_result_t target_value, - bool *cancellation = nullptr, - std::vector>> - *queries = nullptr, - std::invoke_result_t *current_minimum_cost = nullptr ) -{ - using ResultType = std::invoke_result_t; - - validate_adam_optimizer_parameters( cd_params ); - const size_t dimension = cd_params.lower_bounds.size(); - auto step_size = cd_params.step_size; - auto max_iterations = cd_params.max_iterations; - auto maximize = cd_params.maximize; - auto epsilon = cd_params.epsilon; - auto beta1 = cd_params.beta1; - auto beta2 = cd_params.beta2; - auto lower_bounds = cd_params.lower_bounds; - auto upper_bounds = cd_params.upper_bounds; - auto lr = cd_params.lr; - auto weight_decay = cd_params.weight_decay; - auto guess = - random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); - - if ( cd_params.initial_guess ) - { - guess[0] = *cd_params.initial_guess; - } - - std::vector candidates( dimension ); - std::vector gradient( dimension ); - std::vector first_moment( dimension ); - std::vector second_moment( dimension ); - ArgumentContainer current_minimum = guess[0]; - - for ( size_t j = 0; j < dimension; ++j ) - { - first_moment[j] = 0; - second_moment[j] = 0; - } - - for ( size_t i = 0; i < max_iterations; ++i ) - { - double current_cost = cost_function( guess[0] ); - std::cout << " current_cost = " << current_cost << std::endl; - for ( size_t j = 0; j < dimension; ++j ) - { - candidates[j] = guess[0]; - candidates[j][j] += step_size; - - if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) - { - gradient[j] = 0; - continue; - } - - size_t sign = ( maximize == false ? 1 : -1 ); - double cost = cost_function( candidates[j] ); - - for ( size_t k = 0; k < dimension; ++k ) - { - std::cout << "candidates[" << j << "][" << k << "] = " << candidates[j][k] - << std::endl; - std::cout << "guess[0][" << k << "] = " << guess[0][k] << std::endl; - } - - std::cout << " cost = " << cost << std::endl; - gradient[j] = sign * ( cost - current_cost ) / step_size + weight_decay * guess[0][j]; - - if ( !FloatUtils::isNan( target_value ) && cost <= target_value ) - { - guess[0] = candidates[j]; - break; - } - } - - bool gradient_is_zero = true; - for ( size_t j = 0; j < dimension; ++j ) - { - std::cout << "gradient[" << j << "] = " << gradient[j] << std::endl; - if ( FloatUtils::abs( gradient[j] ) > epsilon ) - { - gradient_is_zero = false; - } - } - - if ( gradient_is_zero ) - { - break; - } - - for ( size_t j = 0; j < dimension; ++j ) - { - first_moment[j] = beta1 * first_moment[j] + ( 1 - beta1 ) * gradient[j]; - first_moment[j] /= ( 1 - std::pow( beta1, step_size ) ); - - second_moment[j] = beta2 * first_moment[j] + ( 1 - beta2 ) * std::pow( gradient[j], 2 ); - second_moment[j] /= ( 1 - std::pow( beta2, step_size ) ); - - guess[0][j] -= lr * first_moment[j] / ( std::sqrt( second_moment[j] ) + epsilon ); - - - if ( guess[0][j] > upper_bounds[j] ) - { - guess[0][j] = upper_bounds[j]; - } - - if ( guess[0][j] < lower_bounds[j] ) - { - guess[0][j] = lower_bounds[j]; - } - } - } - - return guess[0]; -} - -} // namespace NLR -#endif From 6ac31b9a0a14e8fd45ff311d364e59ace9ca2734 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:24:50 +0200 Subject: [PATCH 47/72] Delete src/nlr/CoordinateDescent.h --- src/nlr/CoordinateDescent.h | 308 ------------------------------------ 1 file changed, 308 deletions(-) delete mode 100644 src/nlr/CoordinateDescent.h diff --git a/src/nlr/CoordinateDescent.h b/src/nlr/CoordinateDescent.h deleted file mode 100644 index f73d909c7f..0000000000 --- a/src/nlr/CoordinateDescent.h +++ /dev/null @@ -1,308 +0,0 @@ -/* - * Copyright Nick Thompson, 2024 - * Use, modification and distribution are subject to the - * Boost Software License, Version 1.0. (See accompanying file - * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - */ -#ifndef COORDINATE_DESCENT -#define COORDINATE_DESCENT - -#include "FloatUtils.h" -#include "GlobalConfiguration.h" - -#include // for std::sort -#include -#include -#include -#include -#include -#include -#include // for std::false_type -#include -#include - -namespace NLR { - -template struct has_resize : std::false_type -{ -}; - -template -struct has_resize().resize( size_t{} ) )>> : std::true_type -{ -}; - -template constexpr bool has_resize_v = has_resize::value; - -template -void validate_bounds( ArgumentContainer const &lower_bounds, ArgumentContainer const &upper_bounds ) -{ - std::ostringstream oss; - - if ( lower_bounds.size() == 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The dimension of the problem cannot be zero."; - throw std::domain_error( oss.str() ); - } - - if ( upper_bounds.size() != lower_bounds.size() ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be the same number of lower bounds as upper bounds, but given "; - oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() - << " lower bounds."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < lower_bounds.size(); ++i ) - { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - if ( lb > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be greater than or equal to the lower bound, but the " - "upper bound is " - << ub << " and the lower is " << lb << "."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( lb ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The lower bound must be finite, but got " << lb << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a " - "standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( ub ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be finite, but got " << ub << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a " - "standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - } -} - -template -std::vector random_initial_population( ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds, - size_t initial_population_size, - URBG &&gen ) -{ - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - constexpr bool has_resize = has_resize_v; - std::vector population( initial_population_size ); - auto const dimension = lower_bounds.size(); - - for ( size_t i = 0; i < population.size(); ++i ) - { - if constexpr ( has_resize ) - population[i].resize( dimension ); - else - { - // Argument type must be known at compile-time; like std::array: - if ( population[i].size() != dimension ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": For containers which do not have resize, the default size must be the " - "same as the dimension, "; - oss << "but the default container size is " << population[i].size() - << " and the dimension of the problem is " << dimension << "."; - oss << " The function argument container type is " - << typeid( ArgumentContainer ).name() << ".\n"; - throw std::runtime_error( oss.str() ); - } - } - } - - // Why don't we provide an option to initialize with (say) a Gaussian distribution? - // > If the optimum's location is fairly well known, - // > a Gaussian distribution may prove somewhat faster, although it - // > may also increase the probability that the population will converge prematurely. - // > In general, uniform distributions are preferred, since they best reflect - // > the lack of knowledge about the optimum's location. - // - Differential Evolution: A Practical Approach to Global Optimization - // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are - // preferable. So this is something that could be investigated and potentially improved. - std::uniform_real_distribution dis( DimensionlessReal( 0 ), - DimensionlessReal( 1 ) ); - for ( size_t i = 0; i < population.size(); ++i ) - { - for ( size_t j = 0; j < dimension; ++j ) - { - auto const &lb = lower_bounds[j]; - auto const &ub = upper_bounds[j]; - population[i][j] = lb + dis( gen ) * ( ub - lb ); - } - } - - return population; -} - -template -void validate_initial_guess( ArgumentContainer const &initial_guess, - ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds ) -{ - std::ostringstream oss; - auto const dimension = lower_bounds.size(); - - if ( initial_guess.size() != dimension ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The initial guess must have the same dimensions as the problem,"; - oss << ", but the problem size is " << dimension << " and the initial guess has " - << initial_guess.size() << " elements."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < dimension; ++i ) - { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - - if ( !FloatUtils::isFinite( initial_guess[i] ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << ", the initial guess is " << initial_guess[i] - << ", make sure all elements of the initial guess are finite."; - throw std::domain_error( oss.str() ); - } - - if ( initial_guess[i] < lb || initial_guess[i] > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << " the initial guess " << initial_guess[i] - << " is not in the bounds [" << lb << ", " << ub << "]."; - throw std::domain_error( oss.str() ); - } - } -} - -// Single-Threaded Coordinate Descent - -// We provide the parameters in a struct-there are too many of them and they are too unwieldy to -// pass individually: -template struct coordinate_descent_parameters -{ - using Real = typename ArgumentContainer::value_type; - ArgumentContainer lower_bounds; - ArgumentContainer upper_bounds; - - Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; - // size_t max_iterations = - // GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - size_t max_iterations = 2; - ArgumentContainer const *initial_guess = nullptr; -}; - -template -void validate_coordinate_descent_parameters( - coordinate_descent_parameters const &cd_params ) -{ - std::ostringstream oss; - validate_bounds( cd_params.lower_bounds, cd_params.upper_bounds ); - - if ( FloatUtils::isNan( cd_params.step_size ) || cd_params.step_size <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": step_size > 0 is required, but got step_size=" << cd_params.step_size << "."; - throw std::domain_error( oss.str() ); - } - - if ( cd_params.max_iterations < 1 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be at least one generation."; - throw std::invalid_argument( oss.str() ); - } - - if ( cd_params.initial_guess ) - { - validate_initial_guess( - *cd_params.initial_guess, cd_params.lower_bounds, cd_params.upper_bounds ); - } -} - -template -ArgumentContainer coordinate_descent( - const Func cost_function, - coordinate_descent_parameters const &cd_params, - URBG &gen, - std::invoke_result_t target_value, - bool *cancellation = nullptr, - std::vector>> - *queries = nullptr, - std::invoke_result_t *current_minimum_cost = nullptr ) -{ - using ResultType = std::invoke_result_t; - - validate_coordinate_descent_parameters( cd_params ); - const size_t dimension = cd_params.lower_bounds.size(); - const size_t candidates_number = 2 * dimension; - auto step_size = cd_params.step_size; - auto max_iterations = cd_params.max_iterations; - auto lower_bounds = cd_params.lower_bounds; - auto upper_bounds = cd_params.upper_bounds; - auto guess = - random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); - - if ( cd_params.initial_guess ) - { - guess[0] = *cd_params.initial_guess; - } - - std::vector candidates( candidates_number ); - std::vector costs( candidates_number ); - ArgumentContainer current_minimum = guess[0]; - - for ( size_t i = 0; i < max_iterations; ++i ) - { - for ( size_t j = 0; j < candidates_number; ++j ) - { - candidates[j] = guess[0]; - size_t index = j / 2; - size_t sign = ( j % 2 == 0 ? 1 : -1 ); - candidates[j][index] += sign * step_size; - - if ( candidates[j][index] > upper_bounds[j] ) - { - candidates[j][index] = upper_bounds[j]; - } - - if ( candidates[j][index] < lower_bounds[j] ) - { - candidates[j][index] = lower_bounds[j]; - } - - costs[j] = cost_function( candidates[j] ); - - if ( current_minimum_cost && costs[j] < *current_minimum_cost ) - { - *current_minimum_cost = costs[j]; - current_minimum = candidates[j]; - } - - if ( !FloatUtils::isNan( target_value ) && costs[j] <= target_value ) - { - break; - } - } - - guess[0] = current_minimum; - } - - return guess[0]; -} - -} // namespace NLR -#endif From 756ca412740153a3d3b541e0512a0ab998461d2f Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:25:25 +0200 Subject: [PATCH 48/72] Delete src/nlr/DifferentialEvolution.h --- src/nlr/DifferentialEvolution.h | 549 -------------------------------- 1 file changed, 549 deletions(-) delete mode 100644 src/nlr/DifferentialEvolution.h diff --git a/src/nlr/DifferentialEvolution.h b/src/nlr/DifferentialEvolution.h deleted file mode 100644 index eb27c0ef0e..0000000000 --- a/src/nlr/DifferentialEvolution.h +++ /dev/null @@ -1,549 +0,0 @@ -/* - * Copyright Nick Thompson, 2024 - * Use, modification and distribution are subject to the - * Boost Software License, Version 1.0. (See accompanying file - * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - */ -#ifndef DIFFERENTIAL_EVOLUTION -#define DIFFERENTIAL_EVOLUTION - -#include "FloatUtils.h" - -#include // for std::sort -#include -#include -#include -#include -#include -#include -#include -#include -#include // for std::false_type -#include -#include - -namespace NLR { - -template struct has_resize : std::false_type -{ -}; - -template -struct has_resize().resize( size_t{} ) )>> : std::true_type -{ -}; - -template constexpr bool has_resize_v = has_resize::value; - -template -void validate_bounds( ArgumentContainer const &lower_bounds, ArgumentContainer const &upper_bounds ) -{ - std::ostringstream oss; - - if ( lower_bounds.size() == 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The dimension of the problem cannot be zero."; - throw std::domain_error( oss.str() ); - } - - if ( upper_bounds.size() != lower_bounds.size() ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be the same number of lower bounds as upper bounds, but given "; - oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() - << " lower bounds."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < lower_bounds.size(); ++i ) - { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - if ( lb > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be greater than or equal to the lower bound, but the " - "upper bound is " - << ub << " and the lower is " << lb << "."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( lb ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The lower bound must be finite, but got " << lb << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a " - "standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( ub ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be finite, but got " << ub << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a " - "standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - } -} - -template -std::vector random_initial_population( ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds, - size_t initial_population_size, - URBG &&gen ) -{ - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - constexpr bool has_resize = has_resize_v; - std::vector population( initial_population_size ); - auto const dimension = lower_bounds.size(); - - for ( size_t i = 0; i < population.size(); ++i ) - { - if constexpr ( has_resize ) - population[i].resize( dimension ); - else - { - // Argument type must be known at compile-time; like std::array: - if ( population[i].size() != dimension ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": For containers which do not have resize, the default size must be the " - "same as the dimension, "; - oss << "but the default container size is " << population[i].size() - << " and the dimension of the problem is " << dimension << "."; - oss << " The function argument container type is " - << typeid( ArgumentContainer ).name() << ".\n"; - throw std::runtime_error( oss.str() ); - } - } - } - - // Why don't we provide an option to initialize with (say) a Gaussian distribution? - // > If the optimum's location is fairly well known, - // > a Gaussian distribution may prove somewhat faster, although it - // > may also increase the probability that the population will converge prematurely. - // > In general, uniform distributions are preferred, since they best reflect - // > the lack of knowledge about the optimum's location. - // - Differential Evolution: A Practical Approach to Global Optimization - // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are - // preferable. So this is something that could be investigated and potentially improved. - std::uniform_real_distribution dis( DimensionlessReal( 0 ), - DimensionlessReal( 1 ) ); - for ( size_t i = 0; i < population.size(); ++i ) - { - for ( size_t j = 0; j < dimension; ++j ) - { - auto const &lb = lower_bounds[j]; - auto const &ub = upper_bounds[j]; - population[i][j] = lb + dis( gen ) * ( ub - lb ); - } - } - - return population; -} - -template -void validate_initial_guess( ArgumentContainer const &initial_guess, - ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds ) -{ - std::ostringstream oss; - auto const dimension = lower_bounds.size(); - - if ( initial_guess.size() != dimension ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The initial guess must have the same dimensions as the problem,"; - oss << ", but the problem size is " << dimension << " and the initial guess has " - << initial_guess.size() << " elements."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < dimension; ++i ) - { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - - if ( !FloatUtils::isFinite( initial_guess[i] ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << ", the initial guess is " << initial_guess[i] - << ", make sure all elements of the initial guess are finite."; - throw std::domain_error( oss.str() ); - } - - if ( initial_guess[i] < lb || initial_guess[i] > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << " the initial guess " << initial_guess[i] - << " is not in the bounds [" << lb << ", " << ub << "]."; - throw std::domain_error( oss.str() ); - } - } -} - -// Return indices corresponding to the minimum function values. -template -std::vector best_indices( std::vector const &function_values ) -{ - const size_t n = function_values.size(); - std::vector indices( n ); - - for ( size_t i = 0; i < n; ++i ) - { - indices[i] = i; - } - - std::sort( indices.begin(), indices.end(), [&]( size_t a, size_t b ) { - if ( FloatUtils::isNan( function_values[a] ) ) - { - return false; - } - if ( FloatUtils::isNan( function_values[b] ) ) - { - return true; - } - return function_values[a] < function_values[b]; - } ); - - return indices; -} - -template -auto weighted_lehmer_mean( RandomAccessContainer const &values, - RandomAccessContainer const &weights ) -{ - if ( values.size() != weights.size() ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be the same number of weights as values, but got " << values.size() - << " values and " << weights.size() << " weights."; - throw std::logic_error( oss.str() ); - } - - if ( values.size() == 0 ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must at least one value provided."; - throw std::logic_error( oss.str() ); - } - - using Real = typename RandomAccessContainer::value_type; - Real numerator = 0; - Real denominator = 0; - - for ( size_t i = 0; i < values.size(); ++i ) - { - if ( weights[i] < 0 || !FloatUtils::isFinite( weights[i] ) ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": All weights must be positive and finite, but got received weight " - << weights[i] << " at index " << i << " of " << weights.size() << "."; - throw std::domain_error( oss.str() ); - } - - Real tmp = weights[i] * values[i]; - numerator += tmp * values[i]; - denominator += tmp; - } - return numerator / denominator; -} - -// Storn, R., Price, K. (1997). Differential evolution-a simple and efficient heuristic for global -// optimization over continuous spaces. Journal of global optimization, 11, 341-359. See: -// https://www.cp.eng.chula.ac.th/~prabhas//teaching/ec/ec2012/storn_price_de.pdf - -// We provide the parameters in a struct-there are too many of them and they are too unwieldy to -// pass individually: -template struct differential_evolution_parameters -{ - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - ArgumentContainer lower_bounds; - ArgumentContainer upper_bounds; - - // mutation factor is also called scale factor or just F in the literature: - DimensionlessReal mutation_factor = static_cast( 0.65 ); - DimensionlessReal crossover_probability = static_cast( 0.5 ); - - // Population in each generation: - size_t NP = 500; - size_t max_generations = 1000; - ArgumentContainer const *initial_guess = nullptr; - unsigned threads = std::thread::hardware_concurrency(); -}; - -template -void validate_differential_evolution_parameters( - differential_evolution_parameters const &de_params ) -{ - std::ostringstream oss; - validate_bounds( de_params.lower_bounds, de_params.upper_bounds ); - if ( de_params.NP < 4 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The population size must be at least 4, but requested population size of " - << de_params.NP << "."; - throw std::invalid_argument( oss.str() ); - } - - // From: "Differential Evolution: A Practical Approach to Global Optimization (Natural Computing - // Series)" > The scale factor, F in (0,1+), is a positive real number that controls the rate at - // which the population evolves. > While there is no upper limit on F, effective values are - // seldom greater than 1.0. - // ... - // Also see "Limits on F", Section 2.5.1: - // > This discontinuity at F = 1 reduces the number of mutants by half and can result in erratic - // convergence... - auto F = de_params.mutation_factor; - if ( FloatUtils::isNan( F ) || F >= 1 || F <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": F in (0, 1) is required, but got F=" << F << "."; - throw std::domain_error( oss.str() ); - } - - if ( de_params.max_generations < 1 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be at least one generation."; - throw std::invalid_argument( oss.str() ); - } - - if ( de_params.initial_guess ) - { - validate_initial_guess( - *de_params.initial_guess, de_params.lower_bounds, de_params.upper_bounds ); - } - - if ( de_params.threads == 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be at least one thread."; - throw std::invalid_argument( oss.str() ); - } -} - -template -ArgumentContainer differential_evolution( - const Func cost_function, - differential_evolution_parameters const &de_params, - URBG &gen, - std::invoke_result_t target_value = - std::numeric_limits>::quiet_NaN(), - std::atomic *cancellation = nullptr, - std::vector>> - *queries = nullptr, - std::atomic> *current_minimum_cost = nullptr ) -{ - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - using ResultType = std::invoke_result_t; - - validate_differential_evolution_parameters( de_params ); - const size_t dimension = de_params.lower_bounds.size(); - auto NP = de_params.NP; - auto population = - random_initial_population( de_params.lower_bounds, de_params.upper_bounds, NP, gen ); - - if ( de_params.initial_guess ) - { - population[0] = *de_params.initial_guess; - } - - std::vector cost( NP, std::numeric_limits::quiet_NaN() ); - std::atomic target_attained = false; - - // This mutex is only used if the queries are stored: - std::mutex mt; - - std::vector thread_pool; - auto const threads = de_params.threads; - for ( size_t j = 0; j < threads; ++j ) - { - // Note that if some members of the population take way longer to compute, - // then this parallelization strategy is very suboptimal. - // However, we tried using std::async (which should be robust to this particular problem), - // but the overhead was just totally unacceptable on ARM Macs (the only platform tested). - // As the economists say "there are no solutions, only tradeoffs". - - thread_pool.emplace_back( [&, j]() { - for ( size_t i = j; i < cost.size(); i += threads ) - { - cost[i] = cost_function( population[i] ); - if ( current_minimum_cost && cost[i] < *current_minimum_cost ) - { - *current_minimum_cost = cost[i]; - } - if ( queries ) - { - std::scoped_lock lock( mt ); - queries->push_back( std::make_pair( population[i], cost[i] ) ); - } - - if ( !FloatUtils::isNan( target_value ) && cost[i] <= target_value ) - { - target_attained = true; - } - } - } ); - } - - for ( auto &thread : thread_pool ) - { - thread.join(); - } - - std::vector trial_vectors( NP ); - for ( size_t i = 0; i < NP; ++i ) - { - if constexpr ( has_resize_v ) - { - trial_vectors[i].resize( dimension ); - } - } - - std::vector thread_generators( threads ); - for ( size_t j = 0; j < threads; ++j ) - { - thread_generators[j].seed( gen() ); - } - - // std::vector isn't threadsafe! - std::vector updated_indices( NP, 0 ); - - for ( size_t generation = 0; generation < de_params.max_generations; ++generation ) - { - if ( cancellation && *cancellation ) - { - break; - } - if ( target_attained ) - { - break; - } - - thread_pool.resize( 0 ); - for ( size_t j = 0; j < threads; ++j ) - { - thread_pool.emplace_back( [&, j]() { - auto &tlg = thread_generators[j]; - std::uniform_real_distribution unif01( DimensionlessReal( 0 ), - DimensionlessReal( 1 ) ); - - for ( size_t i = j; i < cost.size(); i += threads ) - { - if ( target_attained ) - { - return; - } - if ( cancellation && *cancellation ) - { - return; - } - - size_t r1, r2, r3; - do - { - r1 = tlg() % NP; - } - while ( r1 == i ); - - do - { - r2 = tlg() % NP; - } - while ( r2 == i || r2 == r1 ); - - do - { - r3 = tlg() % NP; - } - while ( r3 == i || r3 == r2 || r3 == r1 ); - - - for ( size_t k = 0; k < dimension; ++k ) - { - // See equation (4) of the reference: - auto guaranteed_changed_idx = tlg() % dimension; - if ( unif01( tlg ) < de_params.crossover_probability || - k == guaranteed_changed_idx ) - { - auto tmp = - population[r1][k] + de_params.mutation_factor * - ( population[r2][k] - population[r3][k] ); - auto const &lb = de_params.lower_bounds[k]; - auto const &ub = de_params.upper_bounds[k]; - // Some others recommend regenerating the indices rather than clamping; - // I dunno seems like it could get stuck regenerating . . . - trial_vectors[i][k] = std::clamp( tmp, lb, ub ); - } - else - { - trial_vectors[i][k] = population[i][k]; - } - } - - auto const trial_cost = cost_function( trial_vectors[i] ); - if ( FloatUtils::isNan( trial_cost ) ) - { - continue; - } - if ( queries ) - { - std::scoped_lock lock( mt ); - queries->push_back( std::make_pair( trial_vectors[i], trial_cost ) ); - } - - if ( trial_cost < cost[i] || FloatUtils::isNan( cost[i] ) ) - { - cost[i] = trial_cost; - if ( !FloatUtils::isNan( target_value ) && cost[i] <= target_value ) - { - target_attained = true; - } - - if ( current_minimum_cost && cost[i] < *current_minimum_cost ) - { - *current_minimum_cost = cost[i]; - } - - // Can't do this! It's a race condition! - // population[i] = trial_vectors[i]; - // Instead mark all the indices that need to be updated: - updated_indices[i] = 1; - } - } - } ); - } - for ( auto &thread : thread_pool ) - { - thread.join(); - } - - for ( size_t i = 0; i < NP; ++i ) - { - if ( updated_indices[i] ) - { - population[i] = trial_vectors[i]; - updated_indices[i] = 0; - } - } - } - - auto it = std::min_element( cost.begin(), cost.end() ); - return population[std::distance( cost.begin(), it )]; -} - -} // namespace NLR -#endif From 5a3a72a8a34902fc74b1c8b6c78c36fc2a7092de Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:25:53 +0200 Subject: [PATCH 49/72] Delete src/nlr/GradientDescent.h --- src/nlr/GradientDescent.h | 353 -------------------------------------- 1 file changed, 353 deletions(-) delete mode 100644 src/nlr/GradientDescent.h diff --git a/src/nlr/GradientDescent.h b/src/nlr/GradientDescent.h deleted file mode 100644 index f582711397..0000000000 --- a/src/nlr/GradientDescent.h +++ /dev/null @@ -1,353 +0,0 @@ -/* - * Copyright Nick Thompson, 2024 - * Use, modification and distribution are subject to the - * Boost Software License, Version 1.0. (See accompanying file - * LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - */ -#ifndef GRADIENT_DESCENT -#define GRADIENT_DESCENT - -#include "FloatUtils.h" -#include "GlobalConfiguration.h" - -#include // for std::sort -#include -#include -#include -#include -#include -#include -#include -#include // for std::false_type -#include -#include - -namespace NLR { - -template struct has_resize : std::false_type -{ -}; - -template -struct has_resize().resize( size_t{} ) )>> : std::true_type -{ -}; - -template constexpr bool has_resize_v = has_resize::value; - -template -void validate_bounds( ArgumentContainer const &lower_bounds, ArgumentContainer const &upper_bounds ) -{ - std::ostringstream oss; - - if ( lower_bounds.size() == 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The dimension of the problem cannot be zero."; - throw std::domain_error( oss.str() ); - } - - if ( upper_bounds.size() != lower_bounds.size() ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be the same number of lower bounds as upper bounds, but given "; - oss << upper_bounds.size() << " upper bounds, and " << lower_bounds.size() - << " lower bounds."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < lower_bounds.size(); ++i ) - { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - if ( lb > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be greater than or equal to the lower bound, but the " - "upper bound is " - << ub << " and the lower is " << lb << "."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( lb ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The lower bound must be finite, but got " << lb << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::lower() or use a " - "standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - - if ( !FloatUtils::isFinite( ub ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The upper bound must be finite, but got " << ub << "."; - oss << " For infinite bounds, emulate with std::numeric_limits::max() or use a " - "standard infinite->finite " - "transform."; - throw std::domain_error( oss.str() ); - } - } -} - -template -std::vector random_initial_population( ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds, - size_t initial_population_size, - URBG &&gen ) -{ - using Real = typename ArgumentContainer::value_type; - using DimensionlessReal = decltype( Real() / Real() ); - constexpr bool has_resize = has_resize_v; - std::vector population( initial_population_size ); - auto const dimension = lower_bounds.size(); - - for ( size_t i = 0; i < population.size(); ++i ) - { - if constexpr ( has_resize ) - population[i].resize( dimension ); - else - { - // Argument type must be known at compile-time; like std::array: - if ( population[i].size() != dimension ) - { - std::ostringstream oss; - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": For containers which do not have resize, the default size must be the " - "same as the dimension, "; - oss << "but the default container size is " << population[i].size() - << " and the dimension of the problem is " << dimension << "."; - oss << " The function argument container type is " - << typeid( ArgumentContainer ).name() << ".\n"; - throw std::runtime_error( oss.str() ); - } - } - } - - // Why don't we provide an option to initialize with (say) a Gaussian distribution? - // > If the optimum's location is fairly well known, - // > a Gaussian distribution may prove somewhat faster, although it - // > may also increase the probability that the population will converge prematurely. - // > In general, uniform distributions are preferred, since they best reflect - // > the lack of knowledge about the optimum's location. - // - Differential Evolution: A Practical Approach to Global Optimization - // That said, scipy uses Latin Hypercube sampling and says self-avoiding sequences are - // preferable. So this is something that could be investigated and potentially improved. - std::uniform_real_distribution dis( DimensionlessReal( 0 ), - DimensionlessReal( 1 ) ); - for ( size_t i = 0; i < population.size(); ++i ) - { - for ( size_t j = 0; j < dimension; ++j ) - { - auto const &lb = lower_bounds[j]; - auto const &ub = upper_bounds[j]; - population[i][j] = lb + dis( gen ) * ( ub - lb ); - } - } - - return population; -} - -template -void validate_initial_guess( ArgumentContainer const &initial_guess, - ArgumentContainer const &lower_bounds, - ArgumentContainer const &upper_bounds ) -{ - std::ostringstream oss; - auto const dimension = lower_bounds.size(); - - if ( initial_guess.size() != dimension ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": The initial guess must have the same dimensions as the problem,"; - oss << ", but the problem size is " << dimension << " and the initial guess has " - << initial_guess.size() << " elements."; - throw std::domain_error( oss.str() ); - } - - for ( size_t i = 0; i < dimension; ++i ) - { - auto lb = lower_bounds[i]; - auto ub = upper_bounds[i]; - - if ( !FloatUtils::isFinite( initial_guess[i] ) ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << ", the initial guess is " << initial_guess[i] - << ", make sure all elements of the initial guess are finite."; - throw std::domain_error( oss.str() ); - } - - if ( initial_guess[i] < lb || initial_guess[i] > ub ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": At index " << i << " the initial guess " << initial_guess[i] - << " is not in the bounds [" << lb << ", " << ub << "]."; - throw std::domain_error( oss.str() ); - } - } -} - -// Single-Threaded Gradient Descent - -// We provide the parameters in a struct-there are too many of them and they are too unwieldy to -// pass individually: -template struct gradient_descent_parameters -{ - using Real = typename ArgumentContainer::value_type; - ArgumentContainer lower_bounds; - ArgumentContainer upper_bounds; - - Real step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; - size_t max_iterations = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; - Real epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; - bool maximize = false; - Real weight_decay = 0; - Real lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; - ArgumentContainer const *initial_guess = nullptr; -}; - -template -void validate_gradient_descent_parameters( - gradient_descent_parameters const &cd_params ) -{ - std::ostringstream oss; - validate_bounds( cd_params.lower_bounds, cd_params.upper_bounds ); - - if ( FloatUtils::isNan( cd_params.step_size ) || cd_params.step_size <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": step_size > 0 is required, but got step_size=" << cd_params.step_size << "."; - throw std::domain_error( oss.str() ); - } - - if ( FloatUtils::isNan( cd_params.epsilon ) || cd_params.epsilon <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": epsilon > 0 is required, but got epsilon=" << cd_params.epsilon << "."; - throw std::domain_error( oss.str() ); - } - - if ( cd_params.max_iterations < 1 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": There must be at least one generation."; - throw std::invalid_argument( oss.str() ); - } - - if ( FloatUtils::isNan( cd_params.lr ) || cd_params.lr <= 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": lr > 0 is required, but got lr=" << cd_params.lr << "."; - throw std::domain_error( oss.str() ); - } - - if ( FloatUtils::isNan( cd_params.weight_decay ) || cd_params.weight_decay < 0 ) - { - oss << __FILE__ << ":" << __LINE__ << ":" << __func__; - oss << ": weight_decay >= 0 is required, but got weight_decay=" << cd_params.weight_decay - << "."; - throw std::domain_error( oss.str() ); - } - - if ( cd_params.initial_guess ) - { - validate_initial_guess( - *cd_params.initial_guess, cd_params.lower_bounds, cd_params.upper_bounds ); - } -} - -template -ArgumentContainer gradient_descent( - const Func cost_function, - gradient_descent_parameters const &cd_params, - URBG &gen, - std::invoke_result_t target_value, - bool *cancellation = nullptr, - std::vector>> - *queries = nullptr, - std::invoke_result_t *current_minimum_cost = nullptr ) -{ - using ResultType = std::invoke_result_t; - - validate_gradient_descent_parameters( cd_params ); - const size_t dimension = cd_params.lower_bounds.size(); - auto step_size = cd_params.step_size; - auto max_iterations = cd_params.max_iterations; - auto maximize = cd_params.maximize; - auto epsilon = cd_params.epsilon; - auto lr = cd_params.lr; - auto weight_decay = cd_params.weight_decay; - auto lower_bounds = cd_params.lower_bounds; - auto upper_bounds = cd_params.upper_bounds; - auto guess = - random_initial_population( cd_params.lower_bounds, cd_params.upper_bounds, 1, gen ); - - if ( cd_params.initial_guess ) - { - guess[0] = *cd_params.initial_guess; - } - - std::vector candidates( dimension ); - std::vector gradient( dimension ); - ArgumentContainer current_minimum = guess[0]; - - for ( size_t i = 0; i < max_iterations; ++i ) - { - double current_cost = cost_function( guess[0] ); - for ( size_t j = 0; j < dimension; ++j ) - { - candidates[j] = guess[0]; - candidates[j][j] += step_size; - - if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) - { - gradient[j] = 0; - continue; - } - - size_t sign = ( maximize == false ? 1 : -1 ); - double cost = cost_function( candidates[j] ); - gradient[j] = sign * ( cost - current_cost ) / step_size + weight_decay * guess[0][j]; - if ( !FloatUtils::isNan( target_value ) && cost <= target_value ) - { - guess[0] = candidates[j]; - break; - } - } - - bool gradient_is_zero = true; - for ( size_t j = 0; j < dimension; ++j ) - { - if ( FloatUtils::abs( gradient[j] ) > epsilon ) - { - gradient_is_zero = false; - } - } - if ( gradient_is_zero ) - { - break; - } - - for ( size_t j = 0; j < dimension; ++j ) - { - guess[0][j] -= lr * gradient[j]; - - if ( guess[0][j] > upper_bounds[j] ) - { - guess[0][j] = upper_bounds[j]; - } - - if ( guess[0][j] < lower_bounds[j] ) - { - guess[0][j] = lower_bounds[j]; - } - } - } - - return guess[0]; -} - -} // namespace NLR -#endif From b093daca8b7b74135e77c683a1afc253ca04918c Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:27:01 +0200 Subject: [PATCH 50/72] Delete src/nlr/MILPSolverBoundTighteningType.h --- src/nlr/MILPSolverBoundTighteningType.h | 46 ------------------------- 1 file changed, 46 deletions(-) delete mode 100644 src/nlr/MILPSolverBoundTighteningType.h diff --git a/src/nlr/MILPSolverBoundTighteningType.h b/src/nlr/MILPSolverBoundTighteningType.h deleted file mode 100644 index 760f82e8d1..0000000000 --- a/src/nlr/MILPSolverBoundTighteningType.h +++ /dev/null @@ -1,46 +0,0 @@ -/********************* */ -/*! \file MILPSolverBoundTighteningType.h -** \verbatim -** Top contributors (to current version): -** Guy Katz -** This file is part of the Marabou project. -** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS -** in the top-level source directory) and their institutional affiliations. -** All rights reserved. See the file COPYING in the top-level source -** directory for licensing information.\endverbatim -** -** [[ Add lengthier description here ]] - -**/ - -#ifndef __MILPSolverBoundTighteningType_h__ -#define __MILPSolverBoundTighteningType_h__ - -/* - MILP solver bound tightening options -*/ -enum class MILPSolverBoundTighteningType { - // Only encode pure linear constraints in the underlying - // solver, in a way that over-approximates the query - LP_RELAXATION = 0, - LP_RELAXATION_INCREMENTAL = 1, - // Encode linear and integer constraints in the underlying - // solver, in a way that completely captures the query but is - // more expensive to solve - MILP_ENCODING = 2, - MILP_ENCODING_INCREMENTAL = 3, - // Encode full queries and tries to fix relus until fix point - ITERATIVE_PROPAGATION = 4, - // Perform backward analysis - BACKWARD_ANALYSIS_ONCE = 5, - BACKWARD_ANALYSIS_CONVERGE = 6, - // Perform backward analysis using the INVPROP Algorithm (arXiv:2302.01404v4 [cs.LG]) - BACKWARD_ANALYSIS_INVPROP = 7, - // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 - // [cs.SE]) - BACKWARD_ANALYSIS_PREIMAGE_APPROX = 8, - // Option to have no MILP bound tightening performed - NONE = 10, -}; - -#endif // __MILPSolverBoundTighteningType_h__ From bf8513bdeec1682f7f30f4a451e34aa17907b66d Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:31:02 +0200 Subject: [PATCH 51/72] Add files via upload --- src/nlr/LPFormulator.cpp | 180 +--------------------- src/nlr/LPFormulator.h | 19 +-- src/nlr/Layer.cpp | 256 +++++++++++++++++++++++++++++-- src/nlr/Layer.h | 21 ++- src/nlr/NetworkLevelReasoner.cpp | 13 +- 5 files changed, 268 insertions(+), 221 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index c9ff9eb6d6..4f5749c9e7 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -337,120 +337,13 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map throw InfeasibleQueryException(); } -void LPFormulator::optimizeBoundsWithInvprop( const Map &layers ) +void LPFormulator::optimizeBoundsWithPreimageApproximation( Map &layers ) { -} - -void LPFormulator::optimizeBoundsWithPreimageApproximation( const Map &layers ) -{ - // Define EstimateVolume bind which captures layers and only receives coeffs as its input. - auto EstimateVolumeBind = std::bind( EstimateVolume, layers, std::placeholders::_1 ); - - // Search over coeffs in [0, 1]^number_of_parameters. Enforce single-threaded optimization. - auto opt_params = gradient_descent_parameters>(); - unsigned num = getNumberOfParameters( layers ); - opt_params.lower_bounds.resize( num, 0 ); - opt_params.upper_bounds.resize( num, 1 ); - double value_to_reach = 0; - std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); - - // Find optimal coeffs and run final parameterised symbolic bound tightening + backward LP - // relaxation. - auto optimal_coeffs = gradient_descent( EstimateVolumeBind, opt_params, rng, value_to_reach ); - - // First, run parameterised symbolic bound propagation. - unsigned i = 0; - Map> layerIndicesToParameters = - getParametersForLayers( layers, optimal_coeffs ); - for ( auto pair : layers ) - { - unsigned layerIndex = pair.first; - Layer *layer = layers[layerIndex]; - std::vector currentLayerCoeffs = layerIndicesToParameters[layerIndex]; - if ( i == layers.size() - 1 ) - { - layer->computeParameterisedSymbolicBounds( currentLayerCoeffs, true, false ); - } - else - { - layer->computeParameterisedSymbolicBounds( currentLayerCoeffs ); - } - i++; - } - - // Finally, run parameterised forward LP and parameterised backward LP. + std::vector optimal_coeffs = layers[0]->OptimalParameterisedSymbolicBoundTightening(); optimizeBoundsWithLpRelaxation( layers, false, optimal_coeffs ); optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); } -double LPFormulator::EstimateVolume( const Map &layers, - std::vector coeffs ) -{ - // First, run parameterised symbolic bound propagation. - Map> layerIndicesToParameters = - getParametersForLayers( layers, coeffs ); - for ( auto pair : layers ) - { - unsigned layerIndex = pair.first; - Layer *layer = layers[layerIndex]; - std::vector currentLayerCoeffs = layerIndicesToParameters[layerIndex]; - layer->computeParameterisedSymbolicBounds( currentLayerCoeffs ); - } - - std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); - double log_box_volume = 0; - double sigmoid_sum = 0; - - unsigned inputLayerIndex = 0; - unsigned outputLayerIndex = layers.size() - 1; - Layer *inputLayer = layers[inputLayerIndex]; - Layer *outputLayer = layers[outputLayerIndex]; - - // Calculate volume of input variables' bounding box. - for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) - { - if ( inputLayer->neuronEliminated( index ) ) - continue; - - double lb = inputLayer->getLb( index ); - double ub = inputLayer->getUb( index ); - - log_box_volume += std::log( ub - lb ); - } - - for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) - { - // Sample input point from known bounds. - Map point; - for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) - { - if ( inputLayer->neuronEliminated( index ) ) - continue; - - NeuronIndex neuron( inputLayerIndex, index ); - double lb = inputLayer->getLb( index ); - double ub = inputLayer->getUb( index ); - std::uniform_real_distribution<> dis( lb, ub ); - point.insert( neuron, dis( rng ) ); - } - - // Calculate sigmoid of maximum margin from output symbolic bounds. - double max_margin = 0; - for ( unsigned index = 0; index < outputLayer->getSize(); ++index ) - { - if ( outputLayer->neuronEliminated( index ) ) - continue; - - double margin = outputLayer->calculateDifferenceFromSymbolic( point, index ); - max_margin = std::max( max_margin, margin ); - } - sigmoid_sum += SigmoidConstraint::sigmoid( max_margin ); - } - - return std::exp( log_box_volume + std::log( sigmoid_sum ) ) / - GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; -} - void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ) { @@ -777,7 +670,7 @@ void LPFormulator::createLPRelaxation( const Map &layers, else { Map> layerIndicesToParameters = - getParametersForLayers( layers, coeffs ); + layer.second->getParametersForLayers( layers, coeffs ); std::vector currentLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; addLayerToParameterisedModel( gurobi, layer.second, false, currentLayerCoeffs ); } @@ -810,7 +703,7 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers else { Map> layerIndicesToParameters = - getParametersForLayers( layers, coeffs ); + currentLayer->getParametersForLayers( layers, coeffs ); std::vector currentLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; addLayerToParameterisedModel( gurobi, currentLayer, true, currentLayerCoeffs ); @@ -1827,71 +1720,6 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, } } -unsigned LPFormulator::getNumberOfParameters( const Map &layers ) -{ - unsigned index = 0; - for ( auto pair : layers ) - { - unsigned layerIndex = pair.first; - Layer *layer = layers[layerIndex]; - switch ( layer->getLayerType() ) - { - case Layer::RELU: - case Layer::LEAKY_RELU: - index++; - break; - - case Layer::SIGN: - case Layer::BILINEAR: - index += 2; - break; - - default: - break; - } - } - return index; -} - -Map> -LPFormulator::getParametersForLayers( const Map &layers, - std::vector coeffs ) -{ - unsigned index = 0; - Map> layerIndicesToParameters; - for ( auto pair : layers ) - { - unsigned layerIndex = pair.first; - Layer *layer = layers[layerIndex]; - std::vector parameters = {}; - switch ( layer->getLayerType() ) - { - case Layer::RELU: - case Layer::LEAKY_RELU: - { - parameters = std::vector( coeffs.begin() + index, coeffs.begin() + index + 1 ); - layerIndicesToParameters.insert( layerIndex, parameters ); - index++; - } - break; - - case Layer::SIGN: - case Layer::BILINEAR: - { - parameters = std::vector( coeffs.begin() + index, coeffs.begin() + index + 2 ); - layerIndicesToParameters.insert( layerIndex, parameters ); - index += 2; - } - break; - - default: - layerIndicesToParameters.insert( layerIndex, parameters ); - break; - } - } - return layerIndicesToParameters; -} - void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index c9164f0011..d89af4c331 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -16,7 +16,6 @@ #ifndef __LPFormulator_h__ #define __LPFormulator_h__ -#include "GradientDescent.h" #include "GurobiWrapper.h" #include "LayerOwner.h" #include "Map.h" @@ -56,8 +55,7 @@ class LPFormulator : public ParallelSolver void optimizeBoundsWithLpRelaxation( const Map &layers, bool backward = false, std::vector coeffs = {} ); - void optimizeBoundsWithInvprop( const Map &layers ); - void optimizeBoundsWithPreimageApproximation( const Map &layers ); + void optimizeBoundsWithPreimageApproximation( Map &layers ); void optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ); void optimizeBoundsWithIncrementalLpRelaxation( const Map &layers ); @@ -91,16 +89,6 @@ class LPFormulator : public ParallelSolver void addLayerToModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ); - - // Get total number of optimizable parameters for parameterised SBT/LP relaxation. - unsigned getNumberOfParameters( const Map &layers ); - - - // Get map containing vector of optimizable parameters for parameterised SBT/LP relaxation for - // every layer index. - static Map> - getParametersForLayers( const Map &layers, std::vector coeffs ); - private: LayerOwner *_layerOwner; bool _cutoffInUse; @@ -176,11 +164,6 @@ class LPFormulator : public ParallelSolver std::vector coeffs ); - // Estimate Volume of parameterised LP relaxations. - static double EstimateVolume( const Map &layers, - std::vector coeffs ); - - /* Optimize for the min/max value of variableName with respect to the constraints encoded in gurobi. If the query is infeasible, *infeasible is set to true. diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 0ebb7872df..e796eda2d0 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -75,7 +75,9 @@ void Layer::allocateMemory() _inputLayerSize = ( _type == INPUT ) ? _size : _layerOwner->getLayer( 0 )->getSize(); if ( Options::get()->getSymbolicBoundTighteningType() == - SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING ) + SymbolicBoundTighteningType::SYMBOLIC_BOUND_TIGHTENING || + Options::get()->getMILPSolverBoundTighteningType() == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) { _symbolicLb = new double[_size * _inputLayerSize]; _symbolicUb = new double[_size * _inputLayerSize]; @@ -3392,21 +3394,6 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } -double Layer::calculateDifferenceFromSymbolic( Map &point, unsigned i ) const -{ - double lowerSum = _symbolicLowerBias[i]; - double upperSum = _symbolicUpperBias[i]; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - NeuronIndex neuron( 0, j ); - lowerSum += _symbolicLb[j * _size + i] * point[neuron]; - upperSum += _symbolicUb[j * _size + i] * point[neuron]; - } - - return std::max( _ub[i] - upperSum, lowerSum - _lb[i] ); -} - void Layer::computeParameterisedSymbolicBounds( std::vector coeffs, bool receivePolygonal, bool receive ) @@ -4335,6 +4322,243 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( std::vector c } } +std::vector Layer::OptimalParameterisedSymbolicBoundTightening() +{ + const Map &layers = _layerOwner->getLayerIndexToLayer(); + + // Search over coeffs in [0, 1]^number_of_parameters with projected grdient descent. + unsigned max_iterations = + GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS; + double step_size = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE; + double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; + double weight_decay = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; + double lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + bool maximize = false; + + unsigned dimension = getNumberOfParameters( layers ); + std::vector lower_bounds; + std::vector upper_bounds; + std::vector guess; + lower_bounds.resize( dimension, 0 ); + upper_bounds.resize( dimension, 1 ); + guess.resize( dimension, 0 ); + + // Initialize initial guess uniformly. + std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); + std::uniform_real_distribution dis( 0, 1 ); + for ( size_t j = 0; j < dimension; ++j ) + { + double lb = lower_bounds[j]; + double ub = upper_bounds[j]; + guess[j] = lb + dis( rng ) * ( ub - lb ); + } + + std::vector> candidates( dimension ); + std::vector gradient( dimension ); + std::vector current_minimum = guess; + + for ( size_t i = 0; i < max_iterations; ++i ) + { + double current_cost = EstimateVolume( guess ); + for ( size_t j = 0; j < dimension; ++j ) + { + candidates[j] = guess; + candidates[j][j] += step_size; + + if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) + { + gradient[j] = 0; + continue; + } + + size_t sign = ( maximize == false ? 1 : -1 ); + double cost = EstimateVolume( candidates[j] ); + gradient[j] = sign * ( cost - current_cost ) / step_size + weight_decay * guess[j]; + } + + bool gradient_is_zero = true; + for ( size_t j = 0; j < dimension; ++j ) + { + if ( FloatUtils::abs( gradient[j] ) > epsilon ) + { + gradient_is_zero = false; + } + } + if ( gradient_is_zero ) + { + break; + } + + for ( size_t j = 0; j < dimension; ++j ) + { + guess[j] -= lr * gradient[j]; + + if ( guess[j] > upper_bounds[j] ) + { + guess[j] = upper_bounds[j]; + } + + if ( guess[j] < lower_bounds[j] ) + { + guess[j] = lower_bounds[j]; + } + } + } + + return guess; +} + +double Layer::EstimateVolume( std::vector coeffs ) +{ + // First, run parameterised symbolic bound propagation. + const Map &layers = _layerOwner->getLayerIndexToLayer(); + Map> layerIndicesToParameters = + getParametersForLayers( layers, coeffs ); + for ( unsigned i = 0; i < layers.size(); ++i ) + { + ASSERT( layers.exists( i ) ); + std::vector currentLayerCoeffs = layerIndicesToParameters[i]; + layers[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + } + + std::mt19937_64 rng( GlobalConfiguration::VOLUME_ESTIMATION_RANDOM_SEED ); + double log_box_volume = 0; + double sigmoid_sum = 0; + + unsigned inputLayerIndex = 0; + unsigned outputLayerIndex = layers.size() - 1; + Layer *inputLayer = layers[inputLayerIndex]; + Layer *outputLayer = layers[outputLayerIndex]; + + // Calculate volume of input variables' bounding box. + for ( unsigned index = 0; index < inputLayer->getSize(); ++index ) + { + if ( inputLayer->neuronEliminated( index ) ) + continue; + + double lb = inputLayer->getLb( index ); + double ub = inputLayer->getUb( index ); + + if ( lb == ub ) + return 0; + + log_box_volume += std::log( ub - lb ); + } + + for ( unsigned i = 0; i < GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; ++i ) + { + // Sample input point from known bounds. + Map point; + for ( unsigned j = 0; j < inputLayer->getSize(); ++j ) + { + if ( inputLayer->neuronEliminated( j ) ) + { + point.insert( j, 0 ); + } + else + { + double lb = inputLayer->getLb( j ); + double ub = inputLayer->getUb( j ); + std::uniform_real_distribution<> dis( lb, ub ); + point.insert( j, dis( rng ) ); + } + } + + // Calculate sigmoid of maximum margin from output symbolic bounds. + double max_margin = 0; + for ( unsigned j = 0; j < outputLayer->getSize(); ++j ) + { + if ( outputLayer->neuronEliminated( j ) ) + continue; + + double margin = outputLayer->calculateDifferenceFromSymbolic( point, j ); + max_margin = std::max( max_margin, margin ); + } + sigmoid_sum += SigmoidConstraint::sigmoid( max_margin ); + } + + return std::exp( log_box_volume + std::log( sigmoid_sum ) ) / + GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS; +} + +double Layer::calculateDifferenceFromSymbolic( Map &point, unsigned i ) const +{ + double lowerSum = _symbolicLowerBias[i]; + double upperSum = _symbolicUpperBias[i]; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + lowerSum += _symbolicLb[j * _size + i] * point[j]; + upperSum += _symbolicUb[j * _size + i] * point[j]; + } + + return std::max( _ub[i] - upperSum, lowerSum - _lb[i] ); +} + +unsigned Layer::getNumberOfParameters( const Map &layers ) +{ + unsigned index = 0; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + switch ( layer->getLayerType() ) + { + case Layer::RELU: + case Layer::LEAKY_RELU: + index++; + break; + + case Layer::SIGN: + case Layer::BILINEAR: + index += 2; + break; + + default: + break; + } + } + return index; +} + +Map> +Layer::getParametersForLayers( const Map &layers, std::vector coeffs ) +{ + unsigned index = 0; + Map> layerIndicesToParameters; + for ( auto pair : layers ) + { + unsigned layerIndex = pair.first; + Layer *layer = layers[layerIndex]; + std::vector parameters = {}; + switch ( layer->getLayerType() ) + { + case Layer::RELU: + case Layer::LEAKY_RELU: + { + parameters = std::vector( coeffs.begin() + index, coeffs.begin() + index + 1 ); + layerIndicesToParameters.insert( layerIndex, parameters ); + index++; + } + break; + + case Layer::SIGN: + case Layer::BILINEAR: + { + parameters = std::vector( coeffs.begin() + index, coeffs.begin() + index + 2 ); + layerIndicesToParameters.insert( layerIndex, parameters ); + index += 2; + } + break; + + default: + layerIndicesToParameters.insert( layerIndex, parameters ); + break; + } + } + return layerIndicesToParameters; +} + double Layer::softmaxLSELowerBound( const Vector &inputs, const Vector &inputLbs, const Vector &inputUbs, diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index fef75a1f8a..3b94bec089 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -136,12 +136,22 @@ class Layer void obtainCurrentBounds( const Query &inputQuery ); void obtainCurrentBounds(); + void computeIntervalArithmeticBounds(); void computeSymbolicBounds(); void computeParameterisedSymbolicBounds( std::vector coeffs, bool receivePolygonal = false, bool receive = false ); - double calculateDifferenceFromSymbolic( Map &point, unsigned i ) const; - void computeIntervalArithmeticBounds(); + + // Get total number of optimizable parameters for parameterised SBT relaxation. + unsigned getNumberOfParameters( const Map &layers ); + + // Get map containing vector of optimizable parameters for parameterised SBT relaxation for + // every layer index. + Map> getParametersForLayers( const Map &layers, + std::vector coeffs ); + + // Return optimizable parameters which minimize parameterised SBT bounds' volume. + std::vector OptimalParameterisedSymbolicBoundTightening(); /* Preprocessing functionality: variable elimination and reindexing @@ -305,6 +315,13 @@ class Layer void computeParameterisedSymbolicBoundsForLeakyRelu( std::vector coeffs, bool receive ); void computeParameterisedSymbolicBoundsForBilinear( std::vector coeffs, bool receive ); + // Estimate Volume of parameterised symbolic bound tightening. + double EstimateVolume( std::vector coeffs ); + + // Return difference between given point and upper and lower bounds determined by parameterised + // SBT relaxation. + double calculateDifferenceFromSymbolic( Map &point, unsigned i ) const; + /* Helper functions for interval bound tightening */ diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index 8b82ae140b..4cf1371b98 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -224,13 +224,11 @@ void NetworkLevelReasoner::symbolicBoundPropagation() void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( std::vector coeffs ) { Map> layerIndicesToParameters = - LPFormulator::getParametersForLayers( _layerIndexToLayer, coeffs ); - for ( auto pair : _layerIndexToLayer ) + _layerIndexToLayer[0]->getParametersForLayers( _layerIndexToLayer, coeffs ); + for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) { - unsigned layerIndex = pair.first; - Layer *layer = _layerIndexToLayer[layerIndex]; - std::vector currentLayerCoeffs = layerIndicesToParameters[layerIndex]; - layer->computeParameterisedSymbolicBounds( currentLayerCoeffs ); + std::vector currentLayerCoeffs = layerIndicesToParameters[i]; + _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); } } @@ -251,9 +249,6 @@ void NetworkLevelReasoner::lpRelaxationPropagation() Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE ) lpFormulator.optimizeBoundsWithLpRelaxation( _layerIndexToLayer, true ); - else if ( Options::get()->getMILPSolverBoundTighteningType() == - MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP ) - lpFormulator.optimizeBoundsWithInvprop( _layerIndexToLayer ); else if ( Options::get()->getMILPSolverBoundTighteningType() == MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) lpFormulator.optimizeBoundsWithPreimageApproximation( _layerIndexToLayer ); From 3b2fe08c9c66db77d3a7abefe1d30f33acddeeb4 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:33:31 +0200 Subject: [PATCH 52/72] Add files via upload --- src/nlr/tests/Test_NetworkLevelReasoner.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index b23deca8cf..1e9fc8eb92 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -13454,7 +13454,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void updateTableau( MockTableau &tableau, List &tightenings ) { - ASSERT( tableau ); for ( const auto &tightening : tightenings ) { if ( tightening._type == Tightening::LB ) From 20c35774d7e31898e06c6e652a531e59c0ff54c1 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:36:04 +0200 Subject: [PATCH 53/72] Add files via upload --- src/engine/Engine.cpp | 13 ++++++++++--- src/engine/MILPSolverBoundTighteningType.h | 4 +--- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index e137c3cbb9..332e95f70e 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -1594,7 +1594,6 @@ void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) case MILPSolverBoundTighteningType::LP_RELAXATION_INCREMENTAL: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: _networkLevelReasoner->lpRelaxationPropagation(); break; @@ -1660,6 +1659,15 @@ void Engine::performAdditionalBackwardAnalysisIfNeeded() printf( "Backward analysis tightened %u bounds\n", tightened ); } } + + if ( _milpSolverBoundTighteningType == + MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX ) + { + performMILPSolverBoundedTightening( &( *_preprocessedQuery ) ); + unsigned tightened = performSymbolicBoundTightening( &( *_preprocessedQuery ) ); + if ( _verbosity > 0 ) + printf( "Backward analysis tightened %u bounds\n", tightened ); + } } void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIndex ) @@ -1687,7 +1695,6 @@ void Engine::performMILPSolverBoundedTighteningForSingleLayer( unsigned targetIn return; case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE: - case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP: case MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX: case MILPSolverBoundTighteningType::ITERATIVE_PROPAGATION: case MILPSolverBoundTighteningType::NONE: @@ -3810,4 +3817,4 @@ void Engine::addPLCLemma( std::shared_ptr &explanation ) ASSERT( explanation && _UNSATCertificate && _UNSATCertificateCurrentPointer ) _statistics.incUnsignedAttribute( Statistics::NUM_LEMMAS ); _UNSATCertificateCurrentPointer->get()->addPLCLemma( explanation ); -} \ No newline at end of file +} diff --git a/src/engine/MILPSolverBoundTighteningType.h b/src/engine/MILPSolverBoundTighteningType.h index 760f82e8d1..517acf73e9 100644 --- a/src/engine/MILPSolverBoundTighteningType.h +++ b/src/engine/MILPSolverBoundTighteningType.h @@ -34,11 +34,9 @@ enum class MILPSolverBoundTighteningType { // Perform backward analysis BACKWARD_ANALYSIS_ONCE = 5, BACKWARD_ANALYSIS_CONVERGE = 6, - // Perform backward analysis using the INVPROP Algorithm (arXiv:2302.01404v4 [cs.LG]) - BACKWARD_ANALYSIS_INVPROP = 7, // Perform backward analysis using the PreimageApproximation Algorithm (arXiv:2305.03686v4 // [cs.SE]) - BACKWARD_ANALYSIS_PREIMAGE_APPROX = 8, + BACKWARD_ANALYSIS_PREIMAGE_APPROX = 7, // Option to have no MILP bound tightening performed NONE = 10, }; From 570eae8348cb947a7a1ccf3d5be6b6b9c2f0b0f2 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Mon, 3 Feb 2025 13:37:48 +0200 Subject: [PATCH 54/72] Add files via upload --- src/configuration/GlobalConfiguration.cpp | 1 + src/configuration/GlobalConfiguration.h | 3 +++ src/configuration/Options.cpp | 2 -- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index a5d5100011..231c24f827 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -73,6 +73,7 @@ const unsigned GlobalConfiguration::VOLUME_ESTIMATION_ITERATIONS = 25000; const unsigned GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_MAX_ITERATIONS = 25; const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_STEP_SIZE = 0.025; const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE = 0.25; +const double GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY = 0; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index 92dcfdd049..dd724819b3 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -169,6 +169,9 @@ class GlobalConfiguration // Learning rate for PreimageApproximation optimization. static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + // Weight decay for PreimageApproximation optimization. + static const double PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; + // How often should projected steepest edge reset the reference space? static const unsigned PSE_ITERATIONS_BEFORE_RESET; diff --git a/src/configuration/Options.cpp b/src/configuration/Options.cpp index 17d570133f..ea74231bbe 100644 --- a/src/configuration/Options.cpp +++ b/src/configuration/Options.cpp @@ -207,8 +207,6 @@ MILPSolverBoundTighteningType Options::getMILPSolverBoundTighteningType() const return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_ONCE; if ( strategyString == "backward-converge" ) return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_CONVERGE; - if ( strategyString == "backward-invprop" ) - return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_INVPROP; if ( strategyString == "backward-preimage-approx" ) return MILPSolverBoundTighteningType::BACKWARD_ANALYSIS_PREIMAGE_APPROX; else if ( strategyString == "milp" ) From 76961563dc832b9c07f13a7f711d3456d65239b4 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 6 Feb 2025 01:14:30 +0200 Subject: [PATCH 55/72] Update GlobalConfiguration.cpp --- src/configuration/GlobalConfiguration.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index 231c24f827..532286aa69 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -88,7 +88,7 @@ const bool GlobalConfiguration::PL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCES const bool GlobalConfiguration::NL_CONSTRAINTS_ADD_AUX_EQUATIONS_AFTER_PREPROCESSING = true; const double GlobalConfiguration::PREPROCESSOR_ALMOST_FIXED_THRESHOLD = 0.00001; -const unsigned GlobalConfiguration::PREPROCESSSING_MAX_TIGHTEING_ROUND = 10; +const unsigned GlobalConfiguration::PREPROCESSSING_MAX_TIGHTEING_ROUND = 1000; const bool GlobalConfiguration::WARM_START = false; From 2c6aba8eb4feccc0ba40e07f16d8eb20475b731f Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 6 Feb 2025 19:28:49 +0200 Subject: [PATCH 56/72] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8ecc8e54f4..d687bbed57 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,8 @@ - Adding incremental infrastructure which allows pushing and popping constraints to/from the InputQuery. - Dropped support for parsing Tensorflow network format. Newest Marabou version that supports Tensorflow is at commit 190555573e4702. - Fixed bug in the parsing of `transpose` nodes in command line C++ parser. + - Implemented forward-backward abstract interpretation, symbolic bound tightening, interval arithmetic and simulations for all activation functions. + - Implemented backward analysis using preimage-approximation algorithm for `Relu`, `LeakyRelu`, `Sign` and `Bilinear` Layers. ## Version 2.0.0 From 2894e4fb70e7c7c3227fd8831175cadc86eafadd Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 6 Feb 2025 19:33:03 +0200 Subject: [PATCH 57/72] Update OptionParser.cpp --- src/configuration/OptionParser.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/configuration/OptionParser.cpp b/src/configuration/OptionParser.cpp index cd94bbc394..fe50a9680f 100644 --- a/src/configuration/OptionParser.cpp +++ b/src/configuration/OptionParser.cpp @@ -267,7 +267,7 @@ void OptionParser::initialize() &( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ) ) ->default_value( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ), "The MILP solver bound tightening type: " - "lp/fb-once/fb-converge/lp-inc/milp/milp-inc/iter-prop/none." ) + "lp/backward-once/backward-converge/backward-preimage-approx/lp-inc/milp/milp-inc/iter-prop/none." ) #endif ; From 5f7a24fc10854e2b3309d67e37f093d1c13550ed Mon Sep 17 00:00:00 2001 From: idan0610 Date: Sat, 15 Feb 2025 09:30:56 +0200 Subject: [PATCH 58/72] smal fix for bug in SumOfInfeasibilitiesManager::proposePhasePatternUpdateRandomly() when the chosen plConstraintToUpdate is fixed --- src/engine/SumOfInfeasibilitiesManager.cpp | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/engine/SumOfInfeasibilitiesManager.cpp b/src/engine/SumOfInfeasibilitiesManager.cpp index 75f372f7f1..38ac64fcf2 100644 --- a/src/engine/SumOfInfeasibilitiesManager.cpp +++ b/src/engine/SumOfInfeasibilitiesManager.cpp @@ -195,8 +195,19 @@ void SumOfInfeasibilitiesManager::proposePhasePatternUpdateRandomly() } ); // First, pick a pl constraint whose cost component we will update. - unsigned index = (unsigned)T::rand() % _plConstraintsInCurrentPhasePattern.size(); - PiecewiseLinearConstraint *plConstraintToUpdate = _plConstraintsInCurrentPhasePattern[index]; + bool fixed = true; + PiecewiseLinearConstraint *plConstraintToUpdate; + while ( fixed ) + { + if ( _plConstraintsInCurrentPhasePattern.empty() ) + return; + + unsigned index = (unsigned)T::rand() % _plConstraintsInCurrentPhasePattern.size(); + plConstraintToUpdate = _plConstraintsInCurrentPhasePattern[index]; + fixed = plConstraintToUpdate->phaseFixed(); + if ( fixed ) + removeCostComponentFromHeuristicCost( plConstraintToUpdate ); + } // Next, pick an alternative phase. PhaseStatus currentPhase = _currentPhasePattern[plConstraintToUpdate]; From 0884a7814228b8b8339e2f5de7193623658c3b87 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 20 Feb 2025 14:10:31 +0200 Subject: [PATCH 59/72] Add files via upload --- CHANGELOG.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 99e50a0297..77be95ad3c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,11 +5,8 @@ - Dropped support for parsing Tensorflow network format. Newest Marabou version that supports Tensorflow is at commit 190555573e4702. - Fixed bug in the parsing of `transpose` nodes in command line C++ parser. - Implemented forward-backward abstract interpretation, symbolic bound tightening, interval arithmetic and simulations for all activation functions. -<<<<<<< HEAD - Implemented backward analysis using preimage-approximation algorithm for `Relu`, `LeakyRelu`, `Sign` and `Bilinear` Layers. -======= - Added the BaBSR heuristic as a new branching strategy for ReLU Splitting ->>>>>>> upstream/master ## Version 2.0.0 From 40f2fe281c50a915c7a315aab7bf83b4c07fbcde Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 20 Feb 2025 14:11:26 +0200 Subject: [PATCH 60/72] Add files via upload --- src/configuration/OptionParser.cpp | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/configuration/OptionParser.cpp b/src/configuration/OptionParser.cpp index 1aba3eff32..54bf7b496d 100644 --- a/src/configuration/OptionParser.cpp +++ b/src/configuration/OptionParser.cpp @@ -267,11 +267,8 @@ void OptionParser::initialize() &( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ) ) ->default_value( ( *_stringOptions )[Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE] ), "The MILP solver bound tightening type: " -<<<<<<< HEAD - "lp/backward-once/backward-converge/backward-preimage-approx/lp-inc/milp/milp-inc/iter-prop/none." ) -======= - "lp/backward-once/backward-converge/lp-inc/milp/milp-inc/iter-prop/none." ) ->>>>>>> upstream/master + "lp/backward-once/backward-converge/backward-preimage-approx/lp-inc/milp/milp-inc/" + "iter-prop/none." ) #endif ; From a95e3a137486f4e508746c439be7af9705079312 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 20 Feb 2025 14:12:59 +0200 Subject: [PATCH 61/72] Add files via upload --- src/nlr/CMakeLists.txt | 5 +++++ src/nlr/LPFormulator.cpp | 43 +--------------------------------------- src/nlr/Layer.cpp | 43 ---------------------------------------- 3 files changed, 6 insertions(+), 85 deletions(-) diff --git a/src/nlr/CMakeLists.txt b/src/nlr/CMakeLists.txt index f2c2161e65..e377a638ba 100644 --- a/src/nlr/CMakeLists.txt +++ b/src/nlr/CMakeLists.txt @@ -19,6 +19,11 @@ network_level_reasoner_add_unit_test(NetworkLevelReasoner) network_level_reasoner_add_unit_test(WsLayerElimination) network_level_reasoner_add_unit_test(ParallelSolver) +if (${ENABLE_GUROBI}) + network_level_reasoner_add_unit_test(LPRelaxation) +endif() + if (${BUILD_PYTHON}) target_include_directories(${MARABOU_PY} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") endif() + diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index 9aad8404f4..c6a76ece4a 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -876,10 +876,6 @@ void LPFormulator::addReluLayerToLpRelaxation( GurobiWrapper &gurobi, } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -944,10 +940,6 @@ void LPFormulator::addRoundLayerToLpRelaxation( GurobiWrapper &gurobi, } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1012,17 +1004,9 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, double ub = std::min( std::max( -sourceLb, sourceUb ), layer->getUb( i ) ); double lb = std::max( 0.0, layer->getLb( i ) ); gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); -<<<<<<< HEAD - /* - The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). - */ -======= + // The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). - /* - The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). - */ ->>>>>>> upstream/master // y >= 0 List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1037,10 +1021,6 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1073,10 +1053,6 @@ void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, if ( createVariables && !gurobi.containsVariable( sourceName ) ) gurobi.addVariable( sourceName, sourceLb, sourceUb ); -<<<<<<< HEAD -======= - ->>>>>>> upstream/master double sourceUbSigmoid = SigmoidConstraint::sigmoid( sourceUb ); double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); @@ -1150,10 +1126,6 @@ void LPFormulator::addSigmoidLayerToLpRelaxation( GurobiWrapper &gurobi, } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void LPFormulator::addSignLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1318,10 +1290,6 @@ void LPFormulator::addMaxLayerToLpRelaxation( GurobiWrapper &gurobi, } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) @@ -1384,11 +1352,6 @@ void LPFormulator::addSoftmaxLayerToLpRelaxation( GurobiWrapper &gurobi, double bias; SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); - -<<<<<<< HEAD -======= - ->>>>>>> upstream/master List terms; if ( FloatUtils::areEqual( lb, ub ) ) { @@ -1603,10 +1566,6 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void LPFormulator::addWeightedSumLayerToLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables ) diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index ccd0922748..2341c07596 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -1063,10 +1063,6 @@ void Layer::computeIntervalArithmeticBoundsForSigmoid() } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void Layer::computeIntervalArithmeticBoundsForRound() { for ( unsigned i = 0; i < _size; ++i ) @@ -1099,10 +1095,6 @@ void Layer::computeIntervalArithmeticBoundsForRound() } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void Layer::computeIntervalArithmeticBoundsForMax() { for ( unsigned i = 0; i < _size; ++i ) @@ -2054,29 +2046,17 @@ void Layer::computeSymbolicBoundsForLeakyRelu() // Symbolic upper bound: x_f <= (x_b - l) * u / ( u - l) // Concrete upper bound: x_f <= ub_b double width = sourceUb - sourceLb; -<<<<<<< HEAD double weight = ( sourceUb - _alpha * sourceLb ) / width; -======= - double coeff = ( sourceUb - _alpha * sourceLb ) / width; ->>>>>>> upstream/master if ( _alpha <= 1 ) { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { -<<<<<<< HEAD _symbolicUb[j * _size + i] *= weight; } // Do the same for the bias, and then adjust _symbolicUpperBias[i] *= weight; -======= - _symbolicUb[j * _size + i] *= coeff; - } - - // Do the same for the bias, and then adjust - _symbolicUpperBias[i] *= coeff; ->>>>>>> upstream/master _symbolicUpperBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; @@ -2111,19 +2091,11 @@ void Layer::computeSymbolicBoundsForLeakyRelu() { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { -<<<<<<< HEAD _symbolicLb[j * _size + i] *= weight; } // Do the same for the bias, and then adjust _symbolicLowerBias[i] *= weight; -======= - _symbolicLb[j * _size + i] *= coeff; - } - - // Do the same for the bias, and then adjust - _symbolicLowerBias[i] *= coeff; ->>>>>>> upstream/master _symbolicLowerBias[i] += ( ( _alpha - 1 ) * sourceUb * sourceLb ) / width; if ( sourceUb > sourceLb ) @@ -2236,10 +2208,6 @@ void Layer::computeSymbolicBoundsForLeakyRelu() } } -<<<<<<< HEAD -======= - ->>>>>>> upstream/master void Layer::computeSymbolicBoundsForSigmoid() { std::fill_n( _symbolicLb, _size * _inputLayerSize, 0 ); @@ -3156,11 +3124,7 @@ void Layer::computeSymbolicBoundsForBilinear() } else { -<<<<<<< HEAD _symbolicUb[j * _size + i] += -======= - _symbolicLb[j * _size + i] += ->>>>>>> upstream/master sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; } @@ -3430,7 +3394,6 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } -<<<<<<< HEAD void Layer::computeParameterisedSymbolicBounds( std::vector coeffs, bool receivePolygonal, bool receive ) @@ -4596,8 +4559,6 @@ Layer::getParametersForLayers( const Map &layers, std::vector return layerIndicesToParameters; } -======= ->>>>>>> upstream/master double Layer::softmaxLSELowerBound( const Vector &inputs, const Vector &inputLbs, const Vector &inputUbs, @@ -4877,10 +4838,6 @@ double Layer::softmaxdERUpperBound( const Vector &inputMids, double li = outputLb[i]; double ui = outputUb[i]; -<<<<<<< HEAD -======= - ->>>>>>> upstream/master if ( i == di ) { double val2 = -1; From f7adf2b87266d3877c05f324f3c72560ab8939fb Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 20 Feb 2025 14:14:16 +0200 Subject: [PATCH 62/72] Add files via upload --- src/nlr/tests/Test_LPRelaxation.h | 5792 +++++++++++++++++++++ src/nlr/tests/Test_NetworkLevelReasoner.h | 3753 +------------ 2 files changed, 5861 insertions(+), 3684 deletions(-) create mode 100644 src/nlr/tests/Test_LPRelaxation.h diff --git a/src/nlr/tests/Test_LPRelaxation.h b/src/nlr/tests/Test_LPRelaxation.h new file mode 100644 index 0000000000..9b58650b2d --- /dev/null +++ b/src/nlr/tests/Test_LPRelaxation.h @@ -0,0 +1,5792 @@ +/********************* */ +/*! \file Test_LPRelaxation.h + ** \verbatim + ** Top contributors (to current version): + ** Guy Katz, Andrew Wu + ** This file is part of the Marabou project. + ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS + ** in the top-level source directory) and their institutional affiliations. + ** All rights reserved. See the file COPYING in the top-level source + ** directory for licensing information.\endverbatim + ** + ** [[ Add lengthier description here ]] + +**/ + +#include "../../engine/tests/MockTableau.h" // TODO: fix this +#include "DeepPolySoftmaxElement.h" +#include "FloatUtils.h" +#include "Layer.h" +#include "NetworkLevelReasoner.h" +#include "Options.h" +#include "PolygonalTightening.h" +#include "Query.h" +#include "Tightening.h" +#include "Vector.h" + +#include + +class MockForNetworkLevelReasoner +{ +public: +}; + +class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite +{ +public: + MockForNetworkLevelReasoner *mock; + + void setUp() + { + TS_ASSERT( mock = new MockForNetworkLevelReasoner ); + } + + void tearDown() + { + TS_ASSERT_THROWS_NOTHING( delete mock ); + } + + void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 R -1 R -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ R / \ R / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using ReLU activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = ReLU( x3 ) + x8 = ReLU( x4 ) + x9 = ReLU( x5 ) + x10 = ReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = ReLU( x11 ) + x15 = ReLU( x12 ) + x16 = ReLU( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = ReLU( x17 ) + x20 = ReLU( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::RELU, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the ReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 S -1 S -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ S / \ S / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Sigmoid activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Sigmoid( x3 ) + x8 = Sigmoid( x4 ) + x9 = Sigmoid( x5 ) + x10 = Sigmoid( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Sigmoid( x11 ) + x15 = Sigmoid( x12 ) + x16 = Sigmoid( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Sigmoid( x17 ) + x20 = Sigmoid( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SIGMOID, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SIGMOID, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::SIGMOID, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Sigmoid sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 Sign -1 Sign -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ Sign / \ Sign / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Sign activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Sign( x3 ) + x8 = Sign( x4 ) + x9 = SIgn( x5 ) + x10 = Sign( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Sign( x11 ) + x15 = Sign( x12 ) + x16 = Sign( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Sign( x17 ) + x20 = Sign( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SIGN, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SIGN, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::SIGN, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Sign sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 Rnd -1 Rnd -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ Rnd / \ Rnd / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Round activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Round( x3 ) + x8 = Round( x4 ) + x9 = Round( x5 ) + x10 = Round( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Round( x11 ) + x15 = Round( x12 ) + x16 = Round( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Round( x17 ) + x20 = Round( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::ROUND, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::ROUND, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::ROUND, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Round sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 A -1 A -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ A / \ A / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + using Absolute value activation instead of LeakyReLU + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = Abs( x3 ) + x8 = Abs( x4 ) + x9 = Abs( x5 ) + x10 = Abs( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = Abs( x11 ) + x15 = Abs( x12 ) + x16 = Abs( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = Abs( x17 ) + x20 = Abs( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::ABSOLUTE_VALUE, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the Abs sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + + 1 LR -1 LR -1 2 + x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 + \ / \ / \ / + 1 \ / 2 \ / 1 \ / + \/ \/ \/ + /\ /\ /\ + -1 / \ 1 / \ -1 / \ + / \ LR / \ LR / \ + x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 + 1 -1 2 + + The example described in Fig. 2 of + https://dl.acm.org/doi/10.1145/3563325 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 1, 1, 0, -1 ); + nlr.setWeight( 0, 1, 1, 1, 1 ); + + nlr.setWeight( 2, 0, 3, 0, -1 ); + nlr.setWeight( 2, 0, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, -1 ); + nlr.setWeight( 4, 1, 5, 1, 2 ); + + nlr.setBias( 5, 1, 2 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 x17 x19 + x1 x12 x15 x21 + x5 x9 x18 x20 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = LeakyReLU( x3 ) + x8 = LeakyReLU( x4 ) + x9 = LeakyReLU( x5 ) + x10 = LeakyReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14 = LeakyReLU( x11 ) + x15 = LeakyReLU( x12 ) + x16 = LeakyReLU( x13 ) + + x17 = -x14 + x15 - x16 + x18 = x14 + x15 + x16 + + x19 = LeakyReLU( x17 ) + x20 = LeakyReLU( x18 ) + + x21 = x19 - x20 - 1 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 3 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); + nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); + + nlr.getLayer( 2 )->setAlpha( 0.1 ); + nlr.getLayer( 4 )->setAlpha( 0.1 ); + nlr.getLayer( 6 )->setAlpha( 0.1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 7; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + nlr.setWeight( 4, 0, 5, 1, 1 ); + nlr.setWeight( 4, 1, 5, 0, 1 ); + nlr.setWeight( 4, 1, 5, 1, 1 ); + nlr.setWeight( 4, 2, 5, 0, -1 ); + nlr.setWeight( 4, 2, 5, 1, 1 ); + + nlr.setWeight( 6, 0, 7, 0, 1 ); + nlr.setWeight( 6, 1, 7, 0, -1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + nlr.setBias( 7, 0, -1 ); + + // Mark the LeakyReLU sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 5, 0, 6, 0 ); + nlr.addActivationSource( 5, 1, 6, 1 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 22 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + } + + void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + a a' + x e + b b' f h + y d + c c' + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::MAX, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + + void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + x3 x7 + x0 x11 x14 + x4 x8 + x1 x12 x15 x17 + x5 x9 + x2 x13 x16 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + x13 = x7 - x8 + 2x9 - 2 + + x14, x15, x16 = Softmax( x11, x12, x13 ) + + x17 = Max( x14, x15, x16 ) + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::SOFTMAX, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 4, NLR::Layer::SOFTMAX, 3 ); + nlr.addLayer( 5, NLR::Layer::MAX, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 0, 3, 2, 1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 1, 3, 2, -1 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 2, 3, 2, 2 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setBias( 3, 0, 2 ); + nlr.setBias( 3, 2, -2 ); + + // Mark the Softmax/Max sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 0 ); + nlr.addActivationSource( 1, 2, 2, 0 ); + nlr.addActivationSource( 1, 3, 2, 0 ); + nlr.addActivationSource( 1, 0, 2, 1 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 1 ); + nlr.addActivationSource( 1, 3, 2, 1 ); + nlr.addActivationSource( 1, 0, 2, 2 ); + nlr.addActivationSource( 1, 1, 2, 2 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 2 ); + nlr.addActivationSource( 1, 0, 2, 3 ); + nlr.addActivationSource( 1, 1, 2, 3 ); + nlr.addActivationSource( 1, 2, 2, 3 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + nlr.addActivationSource( 3, 2, 4, 0 ); + nlr.addActivationSource( 3, 0, 4, 1 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + nlr.addActivationSource( 3, 2, 4, 1 ); + nlr.addActivationSource( 3, 0, 4, 2 ); + nlr.addActivationSource( 3, 1, 4, 2 ); + nlr.addActivationSource( 3, 2, 4, 2 ); + + nlr.addActivationSource( 4, 0, 5, 0 ); + nlr.addActivationSource( 4, 1, 5, 0 ); + nlr.addActivationSource( 4, 2, 5, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 18 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + } + + void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + a a' + x e + b b' f h + y d + c c' + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); + nlr.addLayer( 2, NLR::Layer::RELU, 3 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); + nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, 1 ); + nlr.setWeight( 0, 0, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 1, -3 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + + nlr.setWeight( 2, 0, 3, 0, 1 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 0, 1 ); + nlr.setWeight( 2, 1, 3, 1, 1 ); + nlr.setWeight( 2, 2, 3, 0, -1 ); + nlr.setWeight( 2, 2, 3, 1, -1 ); + + nlr.setWeight( 4, 0, 5, 0, -1 ); + + nlr.setBias( 1, 0, 1 ); + nlr.setBias( 3, 1, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 12 ); + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + } + + void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, + MockTableau &tableau ) + { + /* + x3 x7 + x0 + x4 x8 x11 x13 + x1 x15 + x5 x9 x12 x14 + x2 + x6 x10 + + x3 = -x0 + x1 + x4 = x0 + 2*x1 + x5 = x0 + x1 + x2 + x6 = 3*x0 - 2*x1 - x2 + + x7 = ReLU( x3 ) + x8 = ReLU( x4 ) + x9 = ReLU( x5 ) + x10 = ReLU( x6 ) + + x11 = 2x7 + x9 - x10 + 2 + x12 = -x7 + 2x8 + x10 + + x13 = ReLU( x11 ) + x14 = ReLU( x12 ) + + x15 = x13 * x14 + */ + + // Create the layers + nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); + nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); + nlr.addLayer( 2, NLR::Layer::RELU, 4 ); + nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); + nlr.addLayer( 4, NLR::Layer::RELU, 2 ); + nlr.addLayer( 5, NLR::Layer::BILINEAR, 1 ); + + // Mark layer dependencies + for ( unsigned i = 1; i <= 5; ++i ) + nlr.addLayerDependency( i - 1, i ); + + // Set the weights and biases for the weighted sum layers + nlr.setWeight( 0, 0, 1, 0, -1 ); + nlr.setWeight( 0, 0, 1, 1, 1 ); + nlr.setWeight( 0, 0, 1, 2, 1 ); + nlr.setWeight( 0, 0, 1, 3, 3 ); + nlr.setWeight( 0, 1, 1, 0, 1 ); + nlr.setWeight( 0, 1, 1, 1, 2 ); + nlr.setWeight( 0, 1, 1, 2, 1 ); + nlr.setWeight( 0, 1, 1, 3, -2 ); + nlr.setWeight( 0, 2, 1, 2, 1 ); + nlr.setWeight( 0, 2, 1, 3, -1 ); + + nlr.setWeight( 2, 0, 3, 0, 2 ); + nlr.setWeight( 2, 0, 3, 1, -1 ); + nlr.setWeight( 2, 1, 3, 1, 2 ); + nlr.setWeight( 2, 2, 3, 0, 1 ); + nlr.setWeight( 2, 3, 3, 0, -1 ); + nlr.setWeight( 2, 3, 3, 1, 1 ); + + nlr.setBias( 3, 0, 2 ); + + // Mark the ReLU/Bilinear sources + nlr.addActivationSource( 1, 0, 2, 0 ); + nlr.addActivationSource( 1, 1, 2, 1 ); + nlr.addActivationSource( 1, 2, 2, 2 ); + nlr.addActivationSource( 1, 3, 2, 3 ); + + nlr.addActivationSource( 3, 0, 4, 0 ); + nlr.addActivationSource( 3, 1, 4, 1 ); + + nlr.addActivationSource( 4, 0, 5, 0 ); + nlr.addActivationSource( 4, 1, 5, 0 ); + + // Variable indexing + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 13 ); + nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 14 ); + + nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 15 ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.getBoundManager().initialize( 16 ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + } + + void test_backwards_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 19, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + + Tightening( 20, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sigmoid() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), + Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), + + Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), + Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), + + Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), + Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), + + Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), + Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), + Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), + + Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), + Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), + + Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), + Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), + + Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), + Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sigmoid2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSigmoid2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), + Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), + Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), + Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), + + Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), + Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), + Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), + + Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), + Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), + Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), + + Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), + Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), + + Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), + Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), + + Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), + Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), + Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), + + Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), + Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), + Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), + + Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), + Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), + Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), + + Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), + Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), + + Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), + Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), + + Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_abs() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), + + Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), + + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_abs2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardAbs2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), + + Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), + Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), + Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), + + Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), + + Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_round() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 11, -4, Tightening::LB ), + Tightening( 11, 6, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), + Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), + + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), + + Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -10, Tightening::LB ), + Tightening( 7, 5, Tightening::UB ), + + Tightening( 11, -14, Tightening::LB ), + Tightening( 11, 11, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_round2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardRound2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), + Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), + Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), + + Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), + Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), + + Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), + Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), + + Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), + Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), + + Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), + Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), + Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), + + Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), + Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), + + Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), + Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), + + Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + + Tightening( 8, -0.4571, Tightening::LB ), + Tightening( 9, -1.1057, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), + Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 14, -0.9, Tightening::LB ), + Tightening( 15, -0.89, Tightening::LB ), + Tightening( 16, -0.77, Tightening::LB ), + + Tightening( 19, -2.3133, Tightening::LB ), + Tightening( 20, -1.2, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, -0.2, Tightening::LB ), + Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), + Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 14, -1.1, Tightening::LB ), + Tightening( 15, -2.1771, Tightening::LB ), + Tightening( 16, -1.15, Tightening::LB ), + + Tightening( 19, -5.6259, Tightening::LB ), + Tightening( 20, -1.9, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_softmax_and_max() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), + Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), + Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), + + Tightening( 8, -0.1870, Tightening::LB ), Tightening( 8, 1.4488, Tightening::UB ), + Tightening( 9, 0.6814, Tightening::LB ), Tightening( 9, 2.2432, Tightening::UB ), + + Tightening( 10, 0.6814, Tightening::LB ), Tightening( 10, 2.2432, Tightening::UB ), + + Tightening( 11, -2.2432, Tightening::LB ), Tightening( 11, -0.6814, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), + Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), + + Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), + Tightening( 9, 0.2194, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), + + Tightening( 10, 0.2194, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), + + Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.2194, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_softmax_and_max2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::SOFTMAX_BOUND_TYPE, "lse" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), + Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), + Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), + Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), + + Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9634, Tightening::UB ), + Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9152, Tightening::UB ), + Tightening( 13, -2.9868, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), + + Tightening( 14, 0.1143, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8694, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4596, Tightening::UB ), + + Tightening( 17, 0.1143, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), + + Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9326, Tightening::UB ), + Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), + Tightening( 13, -2.9990, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), + + Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9970, Tightening::UB ), + Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), + Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), + + Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9970, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_backwards_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-converge" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke backward LP propagation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), + Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1.625, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), + + Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 11, 0.8472, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReLU2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), + + Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), + Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), + Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), + + Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), + Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), + + Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), + Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), + + Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 12, -1.75, Tightening::LB ), + Tightening( 13, -4.25, Tightening::LB ), + Tightening( 13, 3.25, Tightening::UB ), + + Tightening( 17, -11.1417, Tightening::LB ), + Tightening( 17, 10, Tightening::UB ), + + Tightening( 21, -17.3084, Tightening::LB ), + Tightening( 21, 3.2160, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), + Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), + Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), + + Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), + Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), + + Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), + Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), + + Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 11, -5, Tightening::LB ), + Tightening( 12, -4.6429, Tightening::LB ), + Tightening( 13, 8.5519, Tightening::UB ), + + Tightening( 17, -23.6231, Tightening::LB ), + Tightening( 17, 14.0909, Tightening::UB ), + Tightening( 18, 2, Tightening::LB ), + Tightening( 18, 28.2015, Tightening::UB ), + + Tightening( 21, -29.2015, Tightening::LB ), + Tightening( 21, 6.5734, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_leaky_relu() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyReLU( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + + Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 7, -2, Tightening::LB ), + + Tightening( 10, -1.8, Tightening::LB ), + Tightening( 10, 0, Tightening::UB ), + + Tightening( 11, 1.4542, Tightening::LB ), + Tightening( 11, 1.4542, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + + Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), + Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), + + Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), + Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), + + Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), + Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 6, -2, Tightening::LB ), + Tightening( 6, 3.1, Tightening::UB ), + Tightening( 7, -3.2, Tightening::LB ), + Tightening( 7, 4, Tightening::UB ), + + Tightening( 8, 3.1, Tightening::UB ), + Tightening( 9, -0.32, Tightening::LB ), + + Tightening( 10, -3.8726, Tightening::LB ), + Tightening( 10, 0.03, Tightening::UB ), + Tightening( 11, 0.4074, Tightening::LB ), + Tightening( 11, 11.3243, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_leaky_relu2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardLeakyRelu2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), + Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), + Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), + + Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), + Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), + Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), + + Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), + Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), + + Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), + Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), + + Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 11, -4.5, Tightening::LB ), + Tightening( 11, 8.5, Tightening::UB ), + Tightening( 12, -2.225, Tightening::LB ), + Tightening( 13, 2.975, Tightening::UB ), + Tightening( 13, -4.175, Tightening::LB ), + + Tightening( 15, -0.2225, Tightening::LB ), + Tightening( 16, 2.975, Tightening::UB ), + Tightening( 16, -0.4175, Tightening::LB ), + + Tightening( 17, -11.452, Tightening::LB ), + Tightening( 17, 10.18, Tightening::UB ), + Tightening( 18, 0.87, Tightening::LB ), + Tightening( 18, 16.0688, Tightening::UB ), + + Tightening( 19, -1.1452, Tightening::LB ), + Tightening( 19, 10.18, Tightening::UB ), + Tightening( 20, 0.87, Tightening::LB ), + Tightening( 20, 16.0688, Tightening::UB ), + + Tightening( 21, -17.0684, Tightening::LB ), + Tightening( 21, 3.6767, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), + Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), + Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), + + Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), + Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), + Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), + + Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), + Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), + + Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), + Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), + + Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 11, -5.6, Tightening::LB ), + Tightening( 11, 16.4636, Tightening::UB ), + Tightening( 12, -6.0286, Tightening::LB ), + Tightening( 13, -5.9, Tightening::LB ), + Tightening( 13, 8.0468, Tightening::UB ), + + Tightening( 15, -0.6029, Tightening::LB ), + Tightening( 16, -0.59, Tightening::LB ), + Tightening( 16, 8.0468, Tightening::UB ), + + Tightening( 17, -24.8864, Tightening::LB ), + Tightening( 17, 14.3076, Tightening::UB ), + Tightening( 18, 0.75, Tightening::LB ), + Tightening( 18, 28.0272, Tightening::UB ), + + Tightening( 19, -2.4886, Tightening::LB ), + Tightening( 19, 14.3076, Tightening::UB ), + Tightening( 20, 0.75, Tightening::LB ), + Tightening( 20, 28.0272, Tightening::UB ), + + Tightening( 21, -29.9648, Tightening::LB ), + Tightening( 21, 6.9619, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_sign() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), + Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), + + Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), + Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), + + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), + Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_sign2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardSign2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + tableau.setLowerBound( 16, -large ); + tableau.setUpperBound( 16, large ); + tableau.setLowerBound( 17, -large ); + tableau.setUpperBound( 17, large ); + tableau.setLowerBound( 18, -large ); + tableau.setUpperBound( 18, large ); + tableau.setLowerBound( 19, -large ); + tableau.setUpperBound( 19, large ); + tableau.setLowerBound( 20, -large ); + tableau.setUpperBound( 20, large ); + tableau.setLowerBound( 21, -large ); + tableau.setUpperBound( 21, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), + Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), + + Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), + Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), + Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), + + Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), + Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), + + Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), + Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), + + Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), + Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), + + Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( {} ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu_and_bilinear() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear( nlr, tableau ); + + tableau.setLowerBound( 0, 0 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, 0 ); + tableau.setUpperBound( 1, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), + Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), + + Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), + Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), + + Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), + + Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 11, -2.4149, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + double large = 1000000; + tableau.setLowerBound( 2, -large ); + tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), + Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), + + Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), + Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), + + Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), + + Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 5.8235, Tightening::UB ), + + Tightening( 10, -14, Tightening::LB ), + + Tightening( 11, -24.8805, Tightening::LB ), + Tightening( 11, 14, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + void test_preimage_approximation_relu_and_bilinear2() + { + Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); + Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, + "backward-preimage-approx" ); + + NLR::NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkBackwardReluAndBilinear2( nlr, tableau ); + + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + tableau.setLowerBound( 2, -1 ); + tableau.setUpperBound( 2, 1 ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), + Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), + + Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), + + Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), + Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), + + Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), + Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), + + Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), + } ); + + List bounds, newBounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds2( { + Tightening( 12, -1.75, Tightening::LB ), + + Tightening( 15, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); + + + // Change the current bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -2 ); + tableau.setUpperBound( 2, 2 ); + + double large = 1000000; + tableau.setLowerBound( 3, -large ); + tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); + tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); + tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); + tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); + tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); + tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); + tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); + tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); + tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); + tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); + tableau.setUpperBound( 13, large ); + tableau.setLowerBound( 14, -large ); + tableau.setUpperBound( 14, large ); + tableau.setLowerBound( 15, -large ); + tableau.setUpperBound( 15, large ); + + + // Invoke DeepPoly + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); + + List expectedBounds3( { + Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), + Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), + Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), + Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), + + Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), + Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), + Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), + + Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), + Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), + + Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), + Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), + + Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + + + // Invoke PreimageApproximation + TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + + List expectedBounds4( { + Tightening( 11, -5, Tightening::LB ), + Tightening( 12, -4.6429, Tightening::LB ), + + Tightening( 15, 0, Tightening::LB ), + } ); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); + bounds = removeRedundancies( bounds, newBounds ); + TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + } + + bool boundsEqual( const List &bounds, const List &expectedBounds ) + { + if ( bounds.size() < expectedBounds.size() ) + return false; + + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } + + // Create list of all tightenings in newBounds for which there is no bound in newBounds or in + // bounds which is at least as tight. + List removeRedundancies( const List &bounds, + const List &newBounds ) + { + List minimalBounds; + + for ( const auto &newBound : newBounds ) + { + bool foundTighter = false; + for ( const auto &bound : bounds ) + { + foundTighter |= + ( newBound._type == bound._type && newBound._variable == bound._variable && + ( ( newBound._type == Tightening::LB && + FloatUtils::lte( newBound._value, bound._value, 0.0001 ) ) || + ( newBound._type == Tightening::UB && + FloatUtils::gte( newBound._value, bound._value, 0.0001 ) ) ) ); + } + + for ( const auto &bound : newBounds ) + { + foundTighter |= + ( newBound._type == bound._type && newBound._variable == bound._variable && + ( ( newBound._type == Tightening::LB && + FloatUtils::lt( newBound._value, bound._value, 0.0001 ) ) || + ( newBound._type == Tightening::UB && + FloatUtils::gt( newBound._value, bound._value, 0.0001 ) ) ) ); + } + + if ( !foundTighter ) + minimalBounds.append( newBound ); + } + return minimalBounds; + } + + void updateTableau( MockTableau &tableau, List &tightenings ) + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + { + tableau.setLowerBound( tightening._variable, tightening._value ); + } + + if ( tightening._type == Tightening::UB ) + { + tableau.setUpperBound( tightening._variable, tightening._value ); + } + } + } +}; diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 569ce5fcd4..7c46ae4317 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -1936,12 +1936,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* - -<<<<<<< HEAD - 1 R -1 R -1 -======= 1 R -1 R -1 2 ->>>>>>> upstream/master x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -1950,11 +1945,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ R / \ R / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 -<<<<<<< HEAD - 1 -1 2 2 -======= 1 -1 2 ->>>>>>> upstream/master The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -2225,12 +2216,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* - -<<<<<<< HEAD - 1 S -1 S -1 -======= 1 S -1 S -1 2 ->>>>>>> upstream/master x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -2239,11 +2225,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ S / \ S / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 -<<<<<<< HEAD - 1 -1 2 2 -======= 1 -1 2 ->>>>>>> upstream/master The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -2511,12 +2493,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) { /* - -<<<<<<< HEAD - 1 Sign -1 Sign -1 -======= 1 Sign -1 Sign -1 2 ->>>>>>> upstream/master x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -2525,11 +2502,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ Sign / \ Sign / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 -<<<<<<< HEAD - 1 -1 2 2 -======= 1 -1 2 ->>>>>>> upstream/master The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -2798,11 +2771,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* -<<<<<<< HEAD - 1 Rnd -1 Rnd -1 -======= 1 Rnd -1 Rnd -1 2 ->>>>>>> upstream/master x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -2811,11 +2780,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ Rnd / \ Rnd / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 -<<<<<<< HEAD - 1 -1 2 2 -======= 1 -1 2 ->>>>>>> upstream/master The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -3084,11 +3049,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* -<<<<<<< HEAD - 1 A -1 A -1 -======= 1 A -1 A -1 2 ->>>>>>> upstream/master x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -3097,11 +3058,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ A / \ A / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 -<<<<<<< HEAD - 1 -1 2 2 -======= 1 -1 2 ->>>>>>> upstream/master The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -3370,11 +3327,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite { /* -<<<<<<< HEAD - 1 LR -1 LR -1 -======= 1 LR -1 LR -1 2 ->>>>>>> upstream/master x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 \ / \ / \ / 1 \ / 2 \ / 1 \ / @@ -3383,11 +3336,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite -1 / \ 1 / \ -1 / \ / \ LR / \ LR / \ x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 -<<<<<<< HEAD - 1 -1 2 2 -======= 1 -1 2 ->>>>>>> upstream/master The example described in Fig. 2 of https://dl.acm.org/doi/10.1145/3563325 @@ -4904,7 +4853,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); // case 2 input[0] = 1; @@ -4914,7 +4862,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } void test_store_into_other_with_softmax() @@ -4974,7 +4921,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); // case 2 input[0] = 1; @@ -4984,7 +4930,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr2.evaluate( input, output2 ) ); TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); - TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } void test_generate_input_query() @@ -9934,3592 +9879,96 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( bounds.exists( bound ) ); } - void test_backwards_relu() + void test_get_previous_bias() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 8, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); + populateNetwork( nlr ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); + // Generate query to create ReLU constraints + Query query; + nlr.generateQuery( query ); + // Find ReLU constraints from the query + List constraints = query.getPiecewiseLinearConstraints(); - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + for ( const auto &constraint : constraints ) + { + ReluConstraint *relu = dynamic_cast( constraint ); + TS_ASSERT( relu ); - List expectedBounds4( { - Tightening( 8, 0, Tightening::LB ), - Tightening( 9, 0, Tightening::LB ), - } ); + nlr.addConstraintInTopologicalOrder( relu ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + // First ReLU layer (nodes 2,0 through 2,2) has previous bias 1 + if ( relu->getB() == 3 || relu->getB() == 5 || relu->getB() == 7 ) + { + TS_ASSERT_EQUALS( nlr.getPreviousBias( relu ), 1 ); + } + // Second ReLU layer (nodes 4,0 and 4,1) has previous bias 2 + else if ( relu->getB() == 9 || relu->getB() == 11 ) + { + TS_ASSERT_EQUALS( nlr.getPreviousBias( relu ), 2 ); + } + } } - void test_backwards_relu2() + void test_get_previous_bias_error_handling() { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), - - Tightening( 19, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - + populateNetwork( nlr ); - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); + // Generate invalid ReLU constraint + ReluConstraint invalidRelu( 15, 16 ); // Variables not in network - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), + // Should throw since variables don't exist in network + TS_ASSERT_THROWS_EQUALS( nlr.getPreviousBias( &invalidRelu ), + const NLRError &e, + e.getCode(), + NLRError::RELU_NOT_FOUND ); - Tightening( 14, 0, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), - Tightening( 16, 0, Tightening::LB ), + // Test missing activation source using fresh network + NLR::NetworkLevelReasoner nlrNoActivations; + // Create minimal network without activation sources + nlrNoActivations.addLayer( 0, NLR::Layer::INPUT, 2 ); + nlrNoActivations.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); + nlrNoActivations.addLayer( 2, NLR::Layer::RELU, 2 ); - Tightening( 20, 0, Tightening::LB ), - } ); + ReluConstraint missingActivation( 2, 3 ); - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); + TS_ASSERT_THROWS_EQUALS( nlrNoActivations.getPreviousBias( &missingActivation ), + const NLRError &e, + e.getCode(), + NLRError::RELU_NOT_FOUND ); } - void test_backwards_sigmoid() + bool boundsEqual( const List &bounds, const List &expectedBounds ) { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid( nlr, tableau ); + if ( bounds.size() < expectedBounds.size() ) + return false; - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); + bool allFound = true; + for ( const auto &bound : bounds ) + { + bool currentFound = false; + for ( const auto &expectedBound : expectedBounds ) + { + currentFound |= + ( bound._type == expectedBound._type && + bound._variable == expectedBound._variable && + FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); + } + allFound &= currentFound; + } + return allFound; + } - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0.2689, Tightening::LB ), Tightening( 4, 0.7311, Tightening::UB ), - Tightening( 5, 0.5, Tightening::LB ), Tightening( 5, 0.8808, Tightening::UB ), - - Tightening( 6, -0.1261, Tightening::LB ), Tightening( 6, 0.5069, Tightening::UB ), - Tightening( 7, -0.2379, Tightening::LB ), Tightening( 7, 0.8571, Tightening::UB ), - - Tightening( 8, 0.4685, Tightening::LB ), Tightening( 8, 0.6241, Tightening::UB ), - Tightening( 9, 0.4408, Tightening::LB ), Tightening( 9, 0.7021, Tightening::UB ), - - Tightening( 10, -1.1819, Tightening::LB ), Tightening( 10, -1.0535, Tightening::UB ), - Tightening( 11, 3.4986, Tightening::LB ), Tightening( 11, 3.8797, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0.0066, Tightening::LB ), Tightening( 4, 0.8807, Tightening::UB ), - Tightening( 5, 0.0179, Tightening::LB ), Tightening( 5, 0.9526, Tightening::UB ), - - Tightening( 6, -0.8362, Tightening::LB ), Tightening( 6, 0.9193, Tightening::UB ), - Tightening( 7, -0.8860, Tightening::LB ), Tightening( 7, 1.6904, Tightening::UB ), - - Tightening( 8, 0.3023, Tightening::LB ), Tightening( 8, 0.7148, Tightening::UB ), - Tightening( 9, 0.2919, Tightening::LB ), Tightening( 9, 0.8443, Tightening::UB ), - - Tightening( 10, -1.2694, Tightening::LB ), Tightening( 10, -0.8841, Tightening::UB ), - Tightening( 11, 3.2396, Tightening::LB ), Tightening( 11, 4.05, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sigmoid2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSigmoid2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.8808, Tightening::UB ), - Tightening( 8, 0.0474, Tightening::LB ), Tightening( 8, 0.9526, Tightening::UB ), - Tightening( 9, 0.0474, Tightening::LB ), Tightening( 9, 0.9526, Tightening::UB ), - Tightening( 10, 0.0025, Tightening::LB ), Tightening( 10, 0.9975, Tightening::UB ), - - Tightening( 11, 1.3787, Tightening::LB ), Tightening( 11, 4.6213, Tightening::UB ), - Tightening( 12, -0.5636, Tightening::LB ), Tightening( 12, 2.5636, Tightening::UB ), - Tightening( 13, -2.3771, Tightening::LB ), Tightening( 13, 0.3771, Tightening::UB ), - - Tightening( 14, 0.7988, Tightening::LB ), Tightening( 14, 0.9903, Tightening::UB ), - Tightening( 15, 0.3627, Tightening::LB ), Tightening( 15, 0.9285, Tightening::UB ), - Tightening( 16, 0.0849, Tightening::LB ), Tightening( 16, 0.5932, Tightening::UB ), - - Tightening( 17, -1.2113, Tightening::LB ), Tightening( 17, 0.0354, Tightening::UB ), - Tightening( 18, 1.4027, Tightening::LB ), Tightening( 18, 2.4177, Tightening::UB ), - - Tightening( 19, 0.2295, Tightening::LB ), Tightening( 19, 0.5088, Tightening::UB ), - Tightening( 20, 0.8026, Tightening::LB ), Tightening( 20, 0.9182, Tightening::UB ), - - Tightening( 21, -1.6539, Tightening::LB ), Tightening( 21, -1.3393, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.1192, Tightening::LB ), Tightening( 7, 0.9933, Tightening::UB ), - Tightening( 8, 0.0067, Tightening::LB ), Tightening( 8, 0.9933, Tightening::UB ), - Tightening( 9, 0.0025, Tightening::LB ), Tightening( 9, 0.9933, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9991, Tightening::UB ), - - Tightening( 11, 1.2517, Tightening::LB ), Tightening( 11, 4.9701, Tightening::UB ), - Tightening( 12, -0.9599, Tightening::LB ), Tightening( 12, 2.8466, Tightening::UB ), - Tightening( 13, -2.8147, Tightening::LB ), Tightening( 13, 0.9188, Tightening::UB ), - - Tightening( 14, 0.7776, Tightening::LB ), Tightening( 14, 0.9931, Tightening::UB ), - Tightening( 15, 0.2769, Tightening::LB ), Tightening( 15, 0.9451, Tightening::UB ), - Tightening( 16, 0.0565, Tightening::LB ), Tightening( 16, 0.7148, Tightening::UB ), - - Tightening( 17, -1.4307, Tightening::LB ), Tightening( 17, 0.1083, Tightening::UB ), - Tightening( 18, 1.2592, Tightening::LB ), Tightening( 18, 2.5519, Tightening::UB ), - - Tightening( 19, 0.1929, Tightening::LB ), Tightening( 19, 0.5270, Tightening::UB ), - Tightening( 20, 0.7789, Tightening::LB ), Tightening( 20, 0.9277, Tightening::UB ), - - Tightening( 21, -1.6967, Tightening::LB ), Tightening( 21, -1.3115, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_abs() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 4, Tightening::UB ), - - Tightening( 6, -5, Tightening::LB ), Tightening( 6, 4, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 10, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 10, Tightening::UB ), - - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 2, Tightening::LB ), Tightening( 11, 27, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_abs2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardAbs2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 9, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 9, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -15, Tightening::LB ), Tightening( 17, 12, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 27, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 15, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -28, Tightening::LB ), Tightening( 21, 14, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 15, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 18, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 25, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 15, Tightening::UB ), - - Tightening( 14, 0, Tightening::LB ), Tightening( 14, 18, Tightening::UB ), - Tightening( 15, 0, Tightening::LB ), Tightening( 15, 25, Tightening::UB ), - Tightening( 16, 0, Tightening::LB ), Tightening( 16, 15, Tightening::UB ), - - Tightening( 17, -33, Tightening::LB ), Tightening( 17, 25, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 58, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 58, Tightening::UB ), - - Tightening( 21, -59, Tightening::LB ), Tightening( 21, 32, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_round() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -4, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -4, Tightening::LB ), Tightening( 9, 2, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 6.5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 11, -4, Tightening::LB ), - Tightening( 11, 6, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -3, Tightening::LB ), Tightening( 6, 5, Tightening::UB ), - Tightening( 7, -10.5, Tightening::LB ), Tightening( 7, 5.5, Tightening::UB ), - - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -10, Tightening::LB ), Tightening( 9, 6, Tightening::UB ), - - Tightening( 10, -3, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - Tightening( 11, -15.5, Tightening::LB ), Tightening( 11, 11.5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -10, Tightening::LB ), - Tightening( 7, 5, Tightening::UB ), - - Tightening( 11, -14, Tightening::LB ), - Tightening( 11, 11, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_round2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardRound2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 15, Tightening::UB ), - Tightening( 12, -10, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -7, Tightening::LB ), Tightening( 13, 3, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 15, Tightening::UB ), - Tightening( 15, -10, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -7, Tightening::LB ), Tightening( 16, 3, Tightening::UB ), - - Tightening( 17, -27.5, Tightening::LB ), Tightening( 17, 27.5, Tightening::UB ), - Tightening( 18, -16.5, Tightening::LB ), Tightening( 18, 16.5, Tightening::UB ), - - Tightening( 19, -28, Tightening::LB ), Tightening( 19, 28, Tightening::UB ), - Tightening( 20, -17, Tightening::LB ), Tightening( 20, 17, Tightening::UB ), - - Tightening( 21, -38, Tightening::LB ), Tightening( 21, 36, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -13, Tightening::LB ), Tightening( 11, 30, Tightening::UB ), - Tightening( 12, -23, Tightening::LB ), Tightening( 12, 12, Tightening::UB ), - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 6, Tightening::UB ), - - Tightening( 14, -13, Tightening::LB ), Tightening( 14, 30, Tightening::UB ), - Tightening( 15, -23, Tightening::LB ), Tightening( 15, 12, Tightening::UB ), - Tightening( 16, -9, Tightening::LB ), Tightening( 16, 6, Tightening::UB ), - - Tightening( 17, -57.5, Tightening::LB ), Tightening( 17, 32.5, Tightening::UB ), - Tightening( 18, -23.5, Tightening::LB ), Tightening( 18, 26.5, Tightening::UB ), - - Tightening( 19, -58, Tightening::LB ), Tightening( 19, 33, Tightening::UB ), - Tightening( 20, -24, Tightening::LB ), Tightening( 20, 27, Tightening::UB ), - - Tightening( 21, -74, Tightening::LB ), Tightening( 21, 44, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_sign2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 4, -0.1, Tightening::LB ), - - Tightening( 8, -0.045, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 4, -0.5, Tightening::LB ), - Tightening( 5, -0.4, Tightening::LB ), - - Tightening( 8, -0.4571, Tightening::LB ), - Tightening( 9, -1.1057, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_leaky_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyRelu2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), - - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), - - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.3, Tightening::LB ), - Tightening( 9, -0.3, Tightening::LB ), - Tightening( 10, -0.6, Tightening::LB ), - - Tightening( 14, -0.9, Tightening::LB ), - Tightening( 15, -0.89, Tightening::LB ), - Tightening( 16, -0.77, Tightening::LB ), - - Tightening( 19, -2.3133, Tightening::LB ), - Tightening( 20, -1.2, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), - - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), - - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, -0.2, Tightening::LB ), - Tightening( 8, -0.5, Tightening::LB ), - Tightening( 9, -0.6, Tightening::LB ), - Tightening( 10, -1.5, Tightening::LB ), - - Tightening( 14, -1.1, Tightening::LB ), - Tightening( 15, -2.1771, Tightening::LB ), - Tightening( 16, -1.15, Tightening::LB ), - - Tightening( 19, -5.6259, Tightening::LB ), - Tightening( 20, -1.9, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_softmax_and_max() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke SBT - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 0.2119, Tightening::LB ), Tightening( 3, 0.8756, Tightening::UB ), - Tightening( 5, 0.0049, Tightening::LB ), Tightening( 5, 0.6652, Tightening::UB ), - Tightening( 7, 0.0634, Tightening::LB ), Tightening( 7, 0.4955, Tightening::UB ), - - Tightening( 8, -0.1519, Tightening::LB ), Tightening( 8, 1.4775, Tightening::UB ), - Tightening( 9, 0.6614, Tightening::LB ), Tightening( 9, 2.3899, Tightening::UB ), - - Tightening( 10, 0.6614, Tightening::LB ), Tightening( 10, 2.3899, Tightening::UB ), - - Tightening( 11, -2.3899, Tightening::LB ), Tightening( 11, -0.6614, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0.0009, Tightening::LB ), Tightening( 3, 0.9526, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0.9966, Tightening::UB ), - Tightening( 7, 0.0024, Tightening::LB ), Tightening( 7, 0.9820, Tightening::UB ), - - Tightening( 8, -0.9811, Tightening::LB ), Tightening( 8, 1.9468, Tightening::UB ), - Tightening( 9, 0.0674, Tightening::LB ), Tightening( 9, 2.9934, Tightening::UB ), - - Tightening( 10, 0.0674, Tightening::LB ), Tightening( 10, 2.9934, Tightening::UB ), - - Tightening( 11, -2.9934, Tightening::LB ), Tightening( 11, -0.0674, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_softmax_and_max2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSoftmaxAndMax2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0.0003, Tightening::LB ), Tightening( 7, 0.9864, Tightening::UB ), - Tightening( 8, 0.0001, Tightening::LB ), Tightening( 8, 0.9907, Tightening::UB ), - Tightening( 9, 0.0001, Tightening::LB ), Tightening( 9, 0.9907, Tightening::UB ), - Tightening( 10, 0.0001, Tightening::LB ), Tightening( 10, 0.9994, Tightening::UB ), - - Tightening( 11, 1.0013, Tightening::LB ), Tightening( 11, 4.9635, Tightening::UB ), - Tightening( 12, -0.9861, Tightening::LB ), Tightening( 12, 2.9806, Tightening::UB ), - Tightening( 13, -2.9902, Tightening::LB ), Tightening( 13, 0.9678, Tightening::UB ), - - Tightening( 14, 0.1086, Tightening::LB ), Tightening( 14, 0.9971, Tightening::UB ), - Tightening( 15, 0.0026, Tightening::LB ), Tightening( 15, 0.8766, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4595, Tightening::UB ), - - Tightening( 17, 0.1086, Tightening::LB ), Tightening( 17, 0.9971, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, 0.0001, Tightening::LB ), Tightening( 7, 0.9999, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 0.9991, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 0.9990, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 0.9999, Tightening::UB ), - - Tightening( 11, 1.0003, Tightening::LB ), Tightening( 11, 4.9989, Tightening::UB ), - Tightening( 12, -0.9999, Tightening::LB ), Tightening( 12, 2.9979, Tightening::UB ), - Tightening( 13, -2.9989, Tightening::LB ), Tightening( 13, 0.9980, Tightening::UB ), - - Tightening( 14, 0.1067, Tightening::LB ), Tightening( 14, 0.9972, Tightening::UB ), - Tightening( 15, 0.0024, Tightening::LB ), Tightening( 15, 0.8786, Tightening::UB ), - Tightening( 16, 0.0003, Tightening::LB ), Tightening( 16, 0.4677, Tightening::UB ), - - Tightening( 17, 0.1067, Tightening::LB ), Tightening( 17, 0.9972, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_backwards_relu_and_bilinear2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-converge" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), - - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), - } ); - - List bounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), - - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke backward LP propagation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 7, 0, Tightening::LB ), - - Tightening( 13, 0, Tightening::LB ), - Tightening( 14, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - -<<<<<<< HEAD - void test_preimage_approximation_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.5, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.5, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0.5, Tightening::UB ), - Tightening( 11, 1.5, Tightening::LB ), Tightening( 11, 4.4, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1.625, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, 0, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 4, Tightening::UB ), - - Tightening( 10, -4.0489, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 10, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 11, 0.8472, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_preimage_approximation_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReLU2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - Tightening( 13, -5, Tightening::LB ), Tightening( 13, 5, Tightening::UB ), - - Tightening( 14, -4, Tightening::LB ), Tightening( 14, 8, Tightening::UB ), - Tightening( 15, -2, Tightening::LB ), Tightening( 15, 10, Tightening::UB ), - Tightening( 16, -5, Tightening::LB ), Tightening( 16, 5, Tightening::UB ), - - Tightening( 17, -14.5, Tightening::LB ), Tightening( 17, 17, Tightening::UB ), - Tightening( 18, 0, Tightening::LB ), Tightening( 18, 17.1667, Tightening::UB ), - - Tightening( 19, -14.5, Tightening::LB ), Tightening( 19, 17, Tightening::UB ), - Tightening( 20, 0, Tightening::LB ), Tightening( 20, 17.1667, Tightening::UB ), - - Tightening( 21, -26, Tightening::LB ), Tightening( 21, 13.9206, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 12, -1.75, Tightening::LB ), - Tightening( 13, -4.25, Tightening::LB ), - Tightening( 13, 3.25, Tightening::UB ), - - Tightening( 17, -11.1417, Tightening::LB ), - Tightening( 17, 10, Tightening::UB ), - - Tightening( 21, -17.3084, Tightening::LB ), - Tightening( 21, 3.2160, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 10.1429, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 15.1818, Tightening::UB ), - Tightening( 15, -5, Tightening::LB ), Tightening( 15, 14.0909, Tightening::UB ), - Tightening( 16, -6, Tightening::LB ), Tightening( 16, 10.1429, Tightening::UB ), - - Tightening( 17, -29.8351, Tightening::LB ), Tightening( 17, 28.2857, Tightening::UB ), - Tightening( 18, -4, Tightening::LB ), Tightening( 18, 29.6479, Tightening::UB ), - - Tightening( 19, 0, Tightening::LB ), Tightening( 19, 28.2857, Tightening::UB ), - Tightening( 20, -4, Tightening::LB ), Tightening( 20, 29.6479, Tightening::UB ), - - Tightening( 21, -30.6479, Tightening::LB ), Tightening( 21, 29.1467, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 11, -5, Tightening::LB ), - Tightening( 12, -4.6429, Tightening::LB ), - Tightening( 13, 8.5519, Tightening::UB ), - - Tightening( 17, -23.6231, Tightening::LB ), - Tightening( 17, 14.0909, Tightening::UB ), - Tightening( 18, 2, Tightening::LB ), - Tightening( 18, 28.2015, Tightening::UB ), - - Tightening( 21, -29.2015, Tightening::LB ), - Tightening( 21, 6.5734, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_preimage_approximation_leaky_relu() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyReLU( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - - Tightening( 6, -0.45, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, -0.45, Tightening::LB ), Tightening( 8, 2, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2.025, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 4.3306, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 7, -2, Tightening::LB ), - - Tightening( 10, -1.8, Tightening::LB ), - Tightening( 10, 0, Tightening::UB ), - - Tightening( 11, 1.4542, Tightening::LB ), - Tightening( 11, 1.4542, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 5, -4, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - - Tightening( 6, -4.5714, Tightening::LB ), Tightening( 6, 6.0571, Tightening::UB ), - Tightening( 7, -11.0571, Tightening::LB ), Tightening( 7, 5.1429, Tightening::UB ), - - Tightening( 8, -4.5714, Tightening::LB ), Tightening( 8, 6.0571, Tightening::UB ), - Tightening( 9, -11.0571, Tightening::LB ), Tightening( 9, 5.1429, Tightening::UB ), - - Tightening( 10, -6.3327, Tightening::LB ), Tightening( 10, 5, Tightening::UB ), - Tightening( 11, -14.0571, Tightening::LB ), Tightening( 11, 12.523, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 6, -2, Tightening::LB ), - Tightening( 6, 3.1, Tightening::UB ), - Tightening( 7, -3.2, Tightening::LB ), - Tightening( 7, 4, Tightening::UB ), - - Tightening( 8, 3.1, Tightening::UB ), - Tightening( 9, -0.32, Tightening::LB ), - - Tightening( 10, -3.8726, Tightening::LB ), - Tightening( 10, 0.03, Tightening::UB ), - Tightening( 11, 0.4074, Tightening::LB ), - Tightening( 11, 11.3243, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_preimage_approximation_leaky_relu2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardLeakyRelu2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, -3, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, -3, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, -6, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 13.9, Tightening::UB ), - Tightening( 12, -8.9, Tightening::LB ), Tightening( 12, 9.8, Tightening::UB ), - Tightening( 13, -7.7, Tightening::LB ), Tightening( 13, 3.5, Tightening::UB ), - - Tightening( 14, -9, Tightening::LB ), Tightening( 14, 13.9, Tightening::UB ), - Tightening( 15, -8.9, Tightening::LB ), Tightening( 15, 9.8, Tightening::UB ), - Tightening( 16, -7.7, Tightening::LB ), Tightening( 16, 3.5, Tightening::UB ), - - Tightening( 17, -23.1331, Tightening::LB ), Tightening( 17, 25.4857, Tightening::UB ), - Tightening( 18, -12, Tightening::LB ), Tightening( 18, 19.3146, Tightening::UB ), - - Tightening( 19, -23.1331, Tightening::LB ), Tightening( 19, 25.4857, Tightening::UB ), - Tightening( 20, -12, Tightening::LB ), Tightening( 20, 19.3146, Tightening::UB ), - - Tightening( 21, -38.0879, Tightening::LB ), Tightening( 21, 30.6367, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 11, -4.5, Tightening::LB ), - Tightening( 11, 8.5, Tightening::UB ), - Tightening( 12, -2.225, Tightening::LB ), - Tightening( 13, 2.975, Tightening::UB ), - Tightening( 13, -4.175, Tightening::LB ), - - Tightening( 15, -0.2225, Tightening::LB ), - Tightening( 16, 2.975, Tightening::UB ), - Tightening( 16, -0.4175, Tightening::LB ), - - Tightening( 17, -11.452, Tightening::LB ), - Tightening( 17, 10.18, Tightening::UB ), - Tightening( 18, 0.87, Tightening::LB ), - Tightening( 18, 16.0688, Tightening::UB ), - - Tightening( 19, -1.1452, Tightening::LB ), - Tightening( 19, 10.18, Tightening::UB ), - Tightening( 20, 0.87, Tightening::LB ), - Tightening( 20, 16.0688, Tightening::UB ), - - Tightening( 21, -17.0684, Tightening::LB ), - Tightening( 21, 3.6767, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, -5, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, -6, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, -15, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -11, Tightening::LB ), Tightening( 11, 29.9636, Tightening::UB ), - Tightening( 12, -21.7714, Tightening::LB ), Tightening( 12, 13.6818, Tightening::UB ), - Tightening( 13, -11.5, Tightening::LB ), Tightening( 13, 8.6442, Tightening::UB ), - - Tightening( 14, -11, Tightening::LB ), Tightening( 14, 29.9636, Tightening::UB ), - Tightening( 15, -21.7714, Tightening::LB ), Tightening( 15, 13.6818, Tightening::UB ), - Tightening( 16, -11.5, Tightening::LB ), Tightening( 16, 8.6442, Tightening::UB ), - - Tightening( 17, -56.2592, Tightening::LB ), Tightening( 17, 33.8084, Tightening::UB ), - Tightening( 18, -19, Tightening::LB ), Tightening( 18, 38.5043, Tightening::UB ), - - Tightening( 19, -56.2592, Tightening::LB ), Tightening( 19, 33.8084, Tightening::UB ), - Tightening( 20, -19, Tightening::LB ), Tightening( 20, 38.5043, Tightening::UB ), - - Tightening( 21, -82.9440, Tightening::LB ), Tightening( 21, 40.7983, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 11, -5.6, Tightening::LB ), - Tightening( 11, 16.4636, Tightening::UB ), - Tightening( 12, -6.0286, Tightening::LB ), - Tightening( 13, -5.9, Tightening::LB ), - Tightening( 13, 8.0468, Tightening::UB ), - - Tightening( 15, -0.6029, Tightening::LB ), - Tightening( 16, -0.59, Tightening::LB ), - Tightening( 16, 8.0468, Tightening::UB ), - - Tightening( 17, -24.8864, Tightening::LB ), - Tightening( 17, 14.3076, Tightening::UB ), - Tightening( 18, 0.75, Tightening::LB ), - Tightening( 18, 28.0272, Tightening::UB ), - - Tightening( 19, -2.4886, Tightening::LB ), - Tightening( 19, 14.3076, Tightening::UB ), - Tightening( 20, 0.75, Tightening::LB ), - Tightening( 20, 28.0272, Tightening::UB ), - - Tightening( 21, -29.9648, Tightening::LB ), - Tightening( 21, 6.9619, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_preimage_approximation_sign() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, -1, Tightening::LB ), Tightening( 2, 1, Tightening::UB ), - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, 1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), - Tightening( 11, 1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -5, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 3, -4, Tightening::LB ), Tightening( 3, 3, Tightening::UB ), - - Tightening( 4, -1, Tightening::LB ), Tightening( 4, 1, Tightening::UB ), - Tightening( 5, -1, Tightening::LB ), Tightening( 5, 1, Tightening::UB ), - - Tightening( 6, -2, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - Tightening( 7, -3, Tightening::LB ), Tightening( 7, 3, Tightening::UB ), - - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - - Tightening( 10, -2, Tightening::LB ), Tightening( 10, 2, Tightening::UB ), - Tightening( 11, -1, Tightening::LB ), Tightening( 11, 5, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_preimage_approximation_sign2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardSign2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - Tightening( 8, -1, Tightening::LB ), Tightening( 8, 1, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 1, Tightening::UB ), - Tightening( 10, -1, Tightening::LB ), Tightening( 10, 1, Tightening::UB ), - - Tightening( 11, -2, Tightening::LB ), Tightening( 11, 6, Tightening::UB ), - Tightening( 12, -4, Tightening::LB ), Tightening( 12, 4, Tightening::UB ), - Tightening( 13, -6, Tightening::LB ), Tightening( 13, 2, Tightening::UB ), - - Tightening( 14, -1, Tightening::LB ), Tightening( 14, 1, Tightening::UB ), - Tightening( 15, -1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, 1, Tightening::UB ), - - Tightening( 17, -3, Tightening::LB ), Tightening( 17, 3, Tightening::UB ), - Tightening( 18, -3, Tightening::LB ), Tightening( 18, 3, Tightening::UB ), - - Tightening( 19, -1, Tightening::LB ), Tightening( 19, 1, Tightening::UB ), - Tightening( 20, -1, Tightening::LB ), Tightening( 20, 1, Tightening::UB ), - - Tightening( 21, -3, Tightening::LB ), Tightening( 21, 1, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( {} ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_preimage_approximation_relu_and_bilinear() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear( nlr, tableau ); - - tableau.setLowerBound( 0, 0 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, 0 ); - tableau.setUpperBound( 1, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 2, 1, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 2, Tightening::UB ), - Tightening( 6, 0, Tightening::LB ), Tightening( 6, 1, Tightening::UB ), - - Tightening( 3, 1, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 2, Tightening::UB ), - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 1, Tightening::UB ), - - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 4, Tightening::UB ), - Tightening( 9, -1, Tightening::LB ), Tightening( 9, 2.2, Tightening::UB ), - - Tightening( 10, -4, Tightening::LB ), Tightening( 10, 8.8, Tightening::UB ), - - Tightening( 11, -8.8, Tightening::LB ), Tightening( 11, 4, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 11, -2.4149, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - - double large = 1000000; - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 2, -2, Tightening::LB ), Tightening( 2, 2, Tightening::UB ), - Tightening( 4, -12, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, 2, Tightening::UB ), - - Tightening( 3, 0, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 7, -1, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - - Tightening( 8, -2, Tightening::LB ), Tightening( 8, 8, Tightening::UB ), - Tightening( 9, -2, Tightening::LB ), Tightening( 9, 8, Tightening::UB ), - - Tightening( 10, -16, Tightening::LB ), Tightening( 10, 64, Tightening::UB ), - - Tightening( 11, -64, Tightening::LB ), Tightening( 11, 16, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 8, 7, Tightening::UB ), - Tightening( 9, 5.8235, Tightening::UB ), - - Tightening( 10, -14, Tightening::LB ), - - Tightening( 11, -24.8805, Tightening::LB ), - Tightening( 11, 14, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - void test_preimage_approximation_relu_and_bilinear2() - { - Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); - Options::get()->setString( Options::MILP_SOLVER_BOUND_TIGHTENING_TYPE, - "backward-preimage-approx" ); - - NLR::NetworkLevelReasoner nlr; - MockTableau tableau; - nlr.setTableau( &tableau ); - populateNetworkBackwardReluAndBilinear2( nlr, tableau ); - - tableau.setLowerBound( 0, -1 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 1 ); - tableau.setLowerBound( 2, -1 ); - tableau.setUpperBound( 2, 1 ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, -3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, -3, Tightening::LB ), Tightening( 5, 3, Tightening::UB ), - Tightening( 6, -6, Tightening::LB ), Tightening( 6, 6, Tightening::UB ), - - Tightening( 7, 0, Tightening::LB ), Tightening( 7, 2, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 3, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 3, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 6, Tightening::UB ), - - Tightening( 11, -4, Tightening::LB ), Tightening( 11, 8, Tightening::UB ), - Tightening( 12, -2, Tightening::LB ), Tightening( 12, 10, Tightening::UB ), - - Tightening( 13, -4, Tightening::LB ), Tightening( 13, 8, Tightening::UB ), - Tightening( 14, -2, Tightening::LB ), Tightening( 14, 10, Tightening::UB ), - - Tightening( 15, -40, Tightening::LB ), Tightening( 15, 80, Tightening::UB ), - } ); - - List bounds, newBounds; - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds2( { - Tightening( 12, -1.75, Tightening::LB ), - - Tightening( 15, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds2 ) ); - - - // Change the current bounds - tableau.setLowerBound( 0, -3 ); - tableau.setUpperBound( 0, 1 ); - tableau.setLowerBound( 1, -1 ); - tableau.setUpperBound( 1, 2 ); - tableau.setLowerBound( 2, -2 ); - tableau.setUpperBound( 2, 2 ); - - double large = 1000000; - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - - - // Invoke DeepPoly - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); - - List expectedBounds3( { - Tightening( 3, -2, Tightening::LB ), Tightening( 3, 5, Tightening::UB ), - Tightening( 4, -5, Tightening::LB ), Tightening( 4, 5, Tightening::UB ), - Tightening( 5, -6, Tightening::LB ), Tightening( 5, 5, Tightening::UB ), - Tightening( 6, -15, Tightening::LB ), Tightening( 6, 7, Tightening::UB ), - - Tightening( 7, -2, Tightening::LB ), Tightening( 7, 5, Tightening::UB ), - Tightening( 8, 0, Tightening::LB ), Tightening( 8, 5, Tightening::UB ), - Tightening( 9, 0, Tightening::LB ), Tightening( 9, 5, Tightening::UB ), - Tightening( 10, 0, Tightening::LB ), Tightening( 10, 7, Tightening::UB ), - - Tightening( 11, -9, Tightening::LB ), Tightening( 11, 15.1818, Tightening::UB ), - Tightening( 12, -5, Tightening::LB ), Tightening( 12, 14.0909, Tightening::UB ), - - Tightening( 13, -9, Tightening::LB ), Tightening( 13, 15.1818, Tightening::UB ), - Tightening( 14, -5, Tightening::LB ), Tightening( 14, 14.0909, Tightening::UB ), - - Tightening( 15, -126.8182, Tightening::LB ), Tightening( 15, 213.9256, Tightening::UB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); - TS_ASSERT( boundsEqual( bounds, expectedBounds3 ) ); - - - // Invoke PreimageApproximation - TS_ASSERT_THROWS_NOTHING( updateTableau( tableau, bounds ) ); - TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); - TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); - - List expectedBounds4( { - Tightening( 11, -5, Tightening::LB ), - Tightening( 12, -4.6429, Tightening::LB ), - - Tightening( 15, 0, Tightening::LB ), - } ); - - TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); - bounds = removeRedundancies( bounds, newBounds ); - TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); - } - - bool boundsEqual( const List &bounds, const List &expectedBounds ) - { - if ( bounds.size() < expectedBounds.size() ) -======= - bool boundsEqual( const List &bounds, const List &expectedBounds ) - { - if ( bounds.size() != expectedBounds.size() ) ->>>>>>> upstream/master - return false; - - bool allFound = true; - for ( const auto &bound : bounds ) - { - bool currentFound = false; - for ( const auto &expectedBound : expectedBounds ) - { - currentFound |= - ( bound._type == expectedBound._type && - bound._variable == expectedBound._variable && - FloatUtils::areEqual( bound._value, expectedBound._value, 0.0001 ) ); - } - allFound &= currentFound; - } - return allFound; - } - -<<<<<<< HEAD - - // Create list of all tightenings in newBounds for which there is no bound in newBounds or in - // bounds which is at least as tight. - List removeRedundancies( const List &bounds, - const List &newBounds ) - { - List minimalBounds; - - for ( const auto &newBound : newBounds ) - { - bool foundTighter = false; - for ( const auto &bound : bounds ) - { - foundTighter |= - ( newBound._type == bound._type && newBound._variable == bound._variable && - ( ( newBound._type == Tightening::LB && - FloatUtils::lte( newBound._value, bound._value, 0.0001 ) ) || - ( newBound._type == Tightening::UB && - FloatUtils::gte( newBound._value, bound._value, 0.0001 ) ) ) ); - } - - for ( const auto &bound : newBounds ) - { - foundTighter |= - ( newBound._type == bound._type && newBound._variable == bound._variable && - ( ( newBound._type == Tightening::LB && - FloatUtils::lt( newBound._value, bound._value, 0.0001 ) ) || - ( newBound._type == Tightening::UB && - FloatUtils::gt( newBound._value, bound._value, 0.0001 ) ) ) ); - } - - if ( !foundTighter ) - minimalBounds.append( newBound ); - } - return minimalBounds; - } - - void updateTableau( MockTableau &tableau, List &tightenings ) - { -======= - void updateTableau( MockTableau &tableau, List &tightenings ) - { - ASSERT( tableau ); ->>>>>>> upstream/master - for ( const auto &tightening : tightenings ) - { - if ( tightening._type == Tightening::LB ) - { - tableau.setLowerBound( tightening._variable, tightening._value ); - } + void updateTableau( MockTableau &tableau, List &tightenings ) + { + for ( const auto &tightening : tightenings ) + { + if ( tightening._type == Tightening::LB ) + { + tableau.setLowerBound( tightening._variable, tightening._value ); + } if ( tightening._type == Tightening::UB ) { @@ -13527,68 +9976,4 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } } } -<<<<<<< HEAD -======= - - void test_get_previous_bias() - { - NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - - // Generate query to create ReLU constraints - Query query; - nlr.generateQuery( query ); - - // Find ReLU constraints from the query - List constraints = query.getPiecewiseLinearConstraints(); - - for ( const auto &constraint : constraints ) - { - ReluConstraint *relu = dynamic_cast( constraint ); - TS_ASSERT( relu ); - - nlr.addConstraintInTopologicalOrder( relu ); - - // First ReLU layer (nodes 2,0 through 2,2) has previous bias 1 - if ( relu->getB() == 3 || relu->getB() == 5 || relu->getB() == 7 ) - { - TS_ASSERT_EQUALS( nlr.getPreviousBias( relu ), 1 ); - } - // Second ReLU layer (nodes 4,0 and 4,1) has previous bias 2 - else if ( relu->getB() == 9 || relu->getB() == 11 ) - { - TS_ASSERT_EQUALS( nlr.getPreviousBias( relu ), 2 ); - } - } - } - - void test_get_previous_bias_error_handling() - { - NLR::NetworkLevelReasoner nlr; - populateNetwork( nlr ); - - // Generate invalid ReLU constraint - ReluConstraint invalidRelu( 15, 16 ); // Variables not in network - - // Should throw since variables don't exist in network - TS_ASSERT_THROWS_EQUALS( nlr.getPreviousBias( &invalidRelu ), - const NLRError &e, - e.getCode(), - NLRError::RELU_NOT_FOUND ); - - // Test missing activation source using fresh network - NLR::NetworkLevelReasoner nlrNoActivations; - // Create minimal network without activation sources - nlrNoActivations.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlrNoActivations.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlrNoActivations.addLayer( 2, NLR::Layer::RELU, 2 ); - - ReluConstraint missingActivation( 2, 3 ); - - TS_ASSERT_THROWS_EQUALS( nlrNoActivations.getPreviousBias( &missingActivation ), - const NLRError &e, - e.getCode(), - NLRError::RELU_NOT_FOUND ); - } ->>>>>>> upstream/master }; From c74870e557dc85435b41add464cb9294cfe88569 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Wed, 26 Feb 2025 20:13:27 +0200 Subject: [PATCH 63/72] Vector &coeffs passed by refrence, removed PolygonalTightening. --- src/engine/PolygonalTightening.h | 101 ---------------------- src/nlr/LPFormulator.cpp | 29 +++---- src/nlr/LPFormulator.h | 24 ++--- src/nlr/Layer.cpp | 83 +++++++----------- src/nlr/Layer.h | 22 ++--- src/nlr/LayerOwner.h | 2 - src/nlr/NetworkLevelReasoner.cpp | 23 +---- src/nlr/NetworkLevelReasoner.h | 17 +--- src/nlr/tests/Test_LPRelaxation.h | 1 - src/nlr/tests/Test_NetworkLevelReasoner.h | 1 - 10 files changed, 72 insertions(+), 231 deletions(-) delete mode 100644 src/engine/PolygonalTightening.h diff --git a/src/engine/PolygonalTightening.h b/src/engine/PolygonalTightening.h deleted file mode 100644 index 99dabafa85..0000000000 --- a/src/engine/PolygonalTightening.h +++ /dev/null @@ -1,101 +0,0 @@ -/********************* */ -/*! \file PolygonalTightening.h - ** \verbatim - ** Top contributors (to current version): - ** Duligur Ibeling, Guy Katz, Ido Shmuel - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2024 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - -**/ - -#ifndef __PolygonalTightening_h__ -#define __PolygonalTightening_h__ - -#include "Map.h" -#include "NeuronIndex.h" - -#include - -class PolygonalTightening -{ -public: - enum PolygonalBoundType { - LB = 0, - UB = 1, - }; - - PolygonalTightening( Map neuronToCoefficient, - double value, - PolygonalBoundType type ) - : _neuronToCoefficient( neuronToCoefficient ) - , _value( value ) - , _type( type ) - { - } - - /* - The coefficient of each neuron. - */ - Map _neuronToCoefficient; - - /* - Its new value. - */ - double _value; - - /* - Whether the tightening tightens the - lower bound or the upper bound. - */ - PolygonalBoundType _type; - - /* - Equality operator. - */ - bool operator==( const PolygonalTightening &other ) const - { - bool allFound = true; - for ( const auto &pair : _neuronToCoefficient ) - { - bool currentFound = false; - for ( const auto &otherPair : other._neuronToCoefficient ) - { - currentFound |= ( pair.first._layer == otherPair.first._layer && - pair.first._neuron == otherPair.first._neuron && - pair.second == otherPair.second ); - } - allFound &= currentFound; - } - bool result = allFound && _value == other._value && _type == other._type && - _neuronToCoefficient.size() == other._neuronToCoefficient.size(); - return result; - } - - void dump() const - { - printf( "PolygonalTightening: x %s %.2lf\n", _type == LB ? ">=" : "<=", _value ); - - for ( const auto &pair : _neuronToCoefficient ) - { - printf( "NeuronIndex: (layer %u, neuron %u), Coefficient: %.2lf )\n", - pair.first._layer, - pair.first._neuron, - pair.second ); - } - } -}; - -#endif // __PolygonalTightening_h__ - -// -// Local Variables: -// compile-command: "make -C ../.. " -// tags-file-name: "../../TAGS" -// c-basic-offset: 4 -// End: -// s diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index c6a76ece4a..b4152769d2 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -250,7 +250,7 @@ void LPFormulator::optimizeBoundsWithIncrementalLpRelaxation( const Map &layers, bool backward, - std::vector coeffs ) + const Vector &coeffs ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -339,7 +339,7 @@ void LPFormulator::optimizeBoundsWithLpRelaxation( const Map void LPFormulator::optimizeBoundsWithPreimageApproximation( Map &layers ) { - std::vector optimal_coeffs = layers[0]->OptimalParameterisedSymbolicBoundTightening(); + const Vector &optimal_coeffs = layers[0]->OptimalParameterisedSymbolicBoundTightening(); optimizeBoundsWithLpRelaxation( layers, false, optimal_coeffs ); optimizeBoundsWithLpRelaxation( layers, true, optimal_coeffs ); } @@ -419,7 +419,7 @@ void LPFormulator::optimizeBoundsOfOneLayerWithLpRelaxation( const Map coeffs ) + const Vector &coeffs ) { unsigned numberOfWorkers = Options::get()->getInt( Options::NUM_WORKERS ); @@ -657,7 +657,7 @@ void LPFormulator::tightenSingleVariableBoundsWithLPRelaxation( ThreadArgument & void LPFormulator::createLPRelaxation( const Map &layers, GurobiWrapper &gurobi, unsigned lastLayer, - std::vector coeffs ) + const Vector &coeffs ) { for ( const auto &layer : layers ) { @@ -669,9 +669,9 @@ void LPFormulator::createLPRelaxation( const Map &layers, addLayerToModel( gurobi, layer.second, false ); else { - Map> layerIndicesToParameters = + Map> layerIndicesToParameters = layer.second->getParametersForLayers( layers, coeffs ); - std::vector currentLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; + const Vector ¤tLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; addLayerToParameterisedModel( gurobi, layer.second, false, currentLayerCoeffs ); } } @@ -680,7 +680,7 @@ void LPFormulator::createLPRelaxation( const Map &layers, void LPFormulator::createLPRelaxationAfter( const Map &layers, GurobiWrapper &gurobi, unsigned firstLayer, - std::vector coeffs ) + const Vector &coeffs ) { unsigned depth = GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH; std::priority_queue, std::greater> layersToAdd; @@ -702,9 +702,9 @@ void LPFormulator::createLPRelaxationAfter( const Map &layers addLayerToModel( gurobi, currentLayer, true ); else { - Map> layerIndicesToParameters = + Map> layerIndicesToParameters = currentLayer->getParametersForLayers( layers, coeffs ); - std::vector currentLayerCoeffs = + const Vector ¤tLayerCoeffs = layerIndicesToParameters[currentLayerIndex]; addLayerToParameterisedModel( gurobi, currentLayer, true, currentLayerCoeffs ); } @@ -1006,7 +1006,6 @@ void LPFormulator::addAbsoluteValueLayerToLpRelaxation( GurobiWrapper &gurobi, gurobi.addVariable( Stringf( "x%u", targetVariable ), lb, ub ); // The phase of this AbsoluteValue is not yet fixed, 0 <= y <= max(-lb, ub). - // y >= 0 List terms; terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); @@ -1722,7 +1721,7 @@ void LPFormulator::addLeakyReluLayerToLpRelaxation( GurobiWrapper &gurobi, void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ) + const Vector &coeffs ) { switch ( layer->getLayerType() ) { @@ -1751,7 +1750,7 @@ void LPFormulator::addLayerToParameterisedModel( GurobiWrapper &gurobi, void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ) + const Vector &coeffs ) { double coeff = coeffs[0]; for ( unsigned i = 0; i < layer->getSize(); ++i ) @@ -1852,7 +1851,7 @@ void LPFormulator::addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurob void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ) + const Vector &coeffs ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { @@ -1939,7 +1938,7 @@ void LPFormulator::addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurob void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ) + const Vector &coeffs ) { double slope = layer->getAlpha(); double coeff = coeffs[0]; @@ -2039,7 +2038,7 @@ void LPFormulator::addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper & void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ) + const Vector &coeffs ) { for ( unsigned i = 0; i < layer->getSize(); ++i ) { diff --git a/src/nlr/LPFormulator.h b/src/nlr/LPFormulator.h index d89af4c331..ac9ecc4557 100644 --- a/src/nlr/LPFormulator.h +++ b/src/nlr/LPFormulator.h @@ -20,7 +20,6 @@ #include "LayerOwner.h" #include "Map.h" #include "ParallelSolver.h" -#include "PolygonalTightening.h" #include #include @@ -54,7 +53,7 @@ class LPFormulator : public ParallelSolver */ void optimizeBoundsWithLpRelaxation( const Map &layers, bool backward = false, - std::vector coeffs = {} ); + const Vector &coeffs = Vector( {} ) ); void optimizeBoundsWithPreimageApproximation( Map &layers ); void optimizeBoundsOfOneLayerWithLpRelaxation( const Map &layers, unsigned targetIndex ); @@ -76,11 +75,11 @@ class LPFormulator : public ParallelSolver void createLPRelaxation( const Map &layers, GurobiWrapper &gurobi, unsigned lastLayer = UINT_MAX, - std::vector coeffs = {} ); + const Vector &coeffs = Vector( {} ) ); void createLPRelaxationAfter( const Map &layers, GurobiWrapper &gurobi, unsigned firstLayer, - std::vector coeffs = {} ); + const Vector &coeffs = Vector( {} ) ); double solveLPRelaxation( GurobiWrapper &gurobi, const Map &layers, MinOrMax minOrMax, @@ -132,36 +131,37 @@ class LPFormulator : public ParallelSolver const Layer *layer, bool createVariables ); - void optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, - bool backward, - std::vector coeffs = {} ); + void + optimizeBoundsOfNeuronsWithLpRelaxation( ThreadArgument &args, + bool backward, + const Vector &coeffs = Vector( {} ) ); // Create LP relaxations depending on external parameters. void addLayerToParameterisedModel( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ); + const Vector &coeffs ); void addReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ); + const Vector &coeffs ); void addLeakyReluLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ); + const Vector &coeffs ); void addSignLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ); + const Vector &coeffs ); void addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &gurobi, const Layer *layer, bool createVariables, - std::vector coeffs ); + const Vector &coeffs ); /* diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 2341c07596..46db84ae8a 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -3394,9 +3394,7 @@ void Layer::computeSymbolicBoundsForWeightedSum() } } -void Layer::computeParameterisedSymbolicBounds( std::vector coeffs, - bool receivePolygonal, - bool receive ) +void Layer::computeParameterisedSymbolicBounds( const Vector &coeffs, bool receive ) { switch ( _type ) { @@ -3420,30 +3418,9 @@ void Layer::computeParameterisedSymbolicBounds( std::vector coeffs, computeSymbolicBounds(); break; } - - if ( receivePolygonal ) - { - for ( unsigned i = 0; i < _size; ++i ) - { - Map lower_polygonal_tightening; - Map upper_polygonal_tightening; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - const NeuronIndex neuron( 0, j ); - lower_polygonal_tightening.insert( neuron, _symbolicLb[j * _size + i] ); - upper_polygonal_tightening.insert( neuron, _symbolicUb[j * _size + i] ); - } - - _layerOwner->receivePolygonalTighterBound( PolygonalTightening( - lower_polygonal_tightening, _symbolicLowerBias[i], PolygonalTightening::LB ) ); - _layerOwner->receivePolygonalTighterBound( PolygonalTightening( - upper_polygonal_tightening, _symbolicUpperBias[i], PolygonalTightening::UB ) ); - } - } } -void Layer::computeParameterisedSymbolicBoundsForRelu( std::vector coeffs, bool receive ) +void Layer::computeParameterisedSymbolicBoundsForRelu( const Vector &coeffs, bool receive ) { ASSERT( coeffs.size() == 1 ); @@ -3623,7 +3600,7 @@ void Layer::computeParameterisedSymbolicBoundsForRelu( std::vector coeff } } -void Layer::computeParameterisedSymbolicBoundsForSign( std::vector coeffs, bool receive ) +void Layer::computeParameterisedSymbolicBoundsForSign( const Vector &coeffs, bool receive ) { ASSERT( coeffs.size() == 2 ); ASSERT( coeffs[0] >= 0 && coeffs[0] <= 1 ); @@ -3840,7 +3817,7 @@ void Layer::computeParameterisedSymbolicBoundsForSign( std::vector coeff } } -void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( std::vector coeffs, +void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( const Vector &coeffs, bool receive ) { ASSERT( _alpha > 0 && _alpha < 1 ); @@ -4084,7 +4061,7 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( std::vector } } -void Layer::computeParameterisedSymbolicBoundsForBilinear( std::vector coeffs, +void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector &coeffs, bool receive ) { ASSERT( coeffs.size() == 2 ); @@ -4322,7 +4299,7 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( std::vector c } } -std::vector Layer::OptimalParameterisedSymbolicBoundTightening() +const Vector Layer::OptimalParameterisedSymbolicBoundTightening() { const Map &layers = _layerOwner->getLayerIndexToLayer(); @@ -4333,17 +4310,19 @@ std::vector Layer::OptimalParameterisedSymbolicBoundTightening() double epsilon = GlobalConfiguration::DEFAULT_EPSILON_FOR_COMPARISONS; double weight_decay = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_WEIGHT_DECAY; double lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; + unsigned dimension = getNumberOfParameters( layers ); bool maximize = false; - unsigned dimension = getNumberOfParameters( layers ); - std::vector lower_bounds; - std::vector upper_bounds; - std::vector guess; - lower_bounds.resize( dimension, 0 ); - upper_bounds.resize( dimension, 1 ); - guess.resize( dimension, 0 ); + Vector lower_bounds( dimension ); + Vector upper_bounds( dimension ); + for ( size_t j = 0; j < dimension; ++j ) + { + lower_bounds[j] = 0; + upper_bounds[j] = 1; + } // Initialize initial guess uniformly. + Vector guess( dimension ); std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); std::uniform_real_distribution dis( 0, 1 ); for ( size_t j = 0; j < dimension; ++j ) @@ -4353,16 +4332,15 @@ std::vector Layer::OptimalParameterisedSymbolicBoundTightening() guess[j] = lb + dis( rng ) * ( ub - lb ); } - std::vector> candidates( dimension ); - std::vector gradient( dimension ); - std::vector current_minimum = guess; + Vector> candidates( dimension ); + Vector gradient( dimension ); for ( size_t i = 0; i < max_iterations; ++i ) { double current_cost = EstimateVolume( guess ); for ( size_t j = 0; j < dimension; ++j ) { - candidates[j] = guess; + candidates[j] = Vector( guess ); candidates[j][j] += step_size; if ( candidates[j][j] > upper_bounds[j] || candidates[j][j] < lower_bounds[j] ) @@ -4405,19 +4383,20 @@ std::vector Layer::OptimalParameterisedSymbolicBoundTightening() } } - return guess; + const Vector optimal_coeffs( guess ); + return optimal_coeffs; } -double Layer::EstimateVolume( std::vector coeffs ) +double Layer::EstimateVolume( const Vector &coeffs ) { // First, run parameterised symbolic bound propagation. const Map &layers = _layerOwner->getLayerIndexToLayer(); - Map> layerIndicesToParameters = + Map> layerIndicesToParameters = getParametersForLayers( layers, coeffs ); for ( unsigned i = 0; i < layers.size(); ++i ) { ASSERT( layers.exists( i ) ); - std::vector currentLayerCoeffs = layerIndicesToParameters[i]; + const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; layers[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); } @@ -4521,23 +4500,21 @@ unsigned Layer::getNumberOfParameters( const Map &layers ) return index; } -Map> -Layer::getParametersForLayers( const Map &layers, std::vector coeffs ) +Map> Layer::getParametersForLayers( const Map &layers, + const Vector &coeffs ) { unsigned index = 0; - Map> layerIndicesToParameters; + Map> layerIndicesToParameters; for ( auto pair : layers ) { unsigned layerIndex = pair.first; Layer *layer = layers[layerIndex]; - std::vector parameters = {}; switch ( layer->getLayerType() ) { case Layer::RELU: case Layer::LEAKY_RELU: { - parameters = std::vector( coeffs.begin() + index, coeffs.begin() + index + 1 ); - layerIndicesToParameters.insert( layerIndex, parameters ); + layerIndicesToParameters.insert( layerIndex, Vector( { coeffs[index] } ) ); index++; } break; @@ -4545,14 +4522,14 @@ Layer::getParametersForLayers( const Map &layers, std::vector case Layer::SIGN: case Layer::BILINEAR: { - parameters = std::vector( coeffs.begin() + index, coeffs.begin() + index + 2 ); - layerIndicesToParameters.insert( layerIndex, parameters ); + layerIndicesToParameters.insert( + layerIndex, Vector( { coeffs[index], coeffs[index + 1] } ) ); index += 2; } break; default: - layerIndicesToParameters.insert( layerIndex, parameters ); + layerIndicesToParameters.insert( layerIndex, Vector( {} ) ); break; } } diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index c61751ad74..a9899964c2 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -138,20 +138,18 @@ class Layer void obtainCurrentBounds(); void computeIntervalArithmeticBounds(); void computeSymbolicBounds(); - void computeParameterisedSymbolicBounds( std::vector coeffs, - bool receivePolygonal = false, - bool receive = false ); + void computeParameterisedSymbolicBounds( const Vector &coeffs, bool receive = false ); // Get total number of optimizable parameters for parameterised SBT relaxation. unsigned getNumberOfParameters( const Map &layers ); // Get map containing vector of optimizable parameters for parameterised SBT relaxation for // every layer index. - Map> getParametersForLayers( const Map &layers, - std::vector coeffs ); + Map> getParametersForLayers( const Map &layers, + const Vector &coeffs ); // Return optimizable parameters which minimize parameterised SBT bounds' volume. - std::vector OptimalParameterisedSymbolicBoundTightening(); + const Vector OptimalParameterisedSymbolicBoundTightening(); /* Preprocessing functionality: variable elimination and reindexing @@ -310,13 +308,15 @@ class Layer /* Helper functions for parameterised symbolic bound tightening */ - void computeParameterisedSymbolicBoundsForRelu( std::vector coeffs, bool receive ); - void computeParameterisedSymbolicBoundsForSign( std::vector coeffs, bool receive ); - void computeParameterisedSymbolicBoundsForLeakyRelu( std::vector coeffs, bool receive ); - void computeParameterisedSymbolicBoundsForBilinear( std::vector coeffs, bool receive ); + void computeParameterisedSymbolicBoundsForRelu( const Vector &coeffs, bool receive ); + void computeParameterisedSymbolicBoundsForSign( const Vector &coeffs, bool receive ); + void computeParameterisedSymbolicBoundsForLeakyRelu( const Vector &coeffs, + bool receive ); + void computeParameterisedSymbolicBoundsForBilinear( const Vector &coeffs, + bool receive ); // Estimate Volume of parameterised symbolic bound tightening. - double EstimateVolume( std::vector coeffs ); + double EstimateVolume( const Vector &coeffs ); // Return difference between given point and upper and lower bounds determined by parameterised // SBT relaxation. diff --git a/src/nlr/LayerOwner.h b/src/nlr/LayerOwner.h index b8ae03cb0f..180a4fdebd 100644 --- a/src/nlr/LayerOwner.h +++ b/src/nlr/LayerOwner.h @@ -17,7 +17,6 @@ #define __LayerOwner_h__ #include "ITableau.h" -#include "PolygonalTightening.h" #include "Tightening.h" namespace NLR { @@ -36,7 +35,6 @@ class LayerOwner virtual const ITableau *getTableau() const = 0; virtual unsigned getNumberOfLayers() const = 0; virtual void receiveTighterBound( Tightening tightening ) = 0; - virtual void receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ) = 0; }; } // namespace NLR diff --git a/src/nlr/NetworkLevelReasoner.cpp b/src/nlr/NetworkLevelReasoner.cpp index f80784f01c..ed07a1d2d5 100644 --- a/src/nlr/NetworkLevelReasoner.cpp +++ b/src/nlr/NetworkLevelReasoner.cpp @@ -198,36 +198,19 @@ void NetworkLevelReasoner::clearConstraintTightenings() _boundTightenings.clear(); } -void NetworkLevelReasoner::receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ) -{ - _polygonalBoundTightenings.append( polygonal_tightening ); -} - -void NetworkLevelReasoner::getConstraintPolygonalTightenings( - List &polygonal_tightenings ) -{ - polygonal_tightenings = _polygonalBoundTightenings; - _polygonalBoundTightenings.clear(); -} - -void NetworkLevelReasoner::clearConstraintPolygonalTightenings() -{ - _polygonalBoundTightenings.clear(); -} - void NetworkLevelReasoner::symbolicBoundPropagation() { for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) _layerIndexToLayer[i]->computeSymbolicBounds(); } -void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( std::vector coeffs ) +void NetworkLevelReasoner::parameterisedSymbolicBoundPropagation( const Vector &coeffs ) { - Map> layerIndicesToParameters = + Map> layerIndicesToParameters = _layerIndexToLayer[0]->getParametersForLayers( _layerIndexToLayer, coeffs ); for ( unsigned i = 0; i < _layerIndexToLayer.size(); ++i ) { - std::vector currentLayerCoeffs = layerIndicesToParameters[i]; + const Vector ¤tLayerCoeffs = layerIndicesToParameters[i]; _layerIndexToLayer[i]->computeParameterisedSymbolicBounds( currentLayerCoeffs ); } } diff --git a/src/nlr/NetworkLevelReasoner.h b/src/nlr/NetworkLevelReasoner.h index a09b4a4712..03389789c4 100644 --- a/src/nlr/NetworkLevelReasoner.h +++ b/src/nlr/NetworkLevelReasoner.h @@ -24,7 +24,6 @@ #include "MatrixMultiplication.h" #include "NeuronIndex.h" #include "PiecewiseLinearFunctionType.h" -#include "PolygonalTightening.h" #include "Tightening.h" #include "Vector.h" @@ -121,13 +120,6 @@ class NetworkLevelReasoner : public LayerOwner - getConstraintTightenings: this is the function that an external user calls in order to collect the tighter bounds discovered by the NLR. - - - receiveTighterPolygonalBound: this is a callback from the layer - objects, through which they report tighter polygonal bounds. - - - getConstraintPolygonalTightenings: this is the function that an - external user calls in order to collect the tighter polygonal bounds - discovered by the NLR. */ void setTableau( const ITableau *tableau ); @@ -137,7 +129,7 @@ class NetworkLevelReasoner : public LayerOwner void obtainCurrentBounds(); void intervalArithmeticBoundPropagation(); void symbolicBoundPropagation(); - void parameterisedSymbolicBoundPropagation( std::vector coeffs ); + void parameterisedSymbolicBoundPropagation( const Vector &coeffs ); void deepPolyPropagation(); void lpRelaxationPropagation(); void LPTighteningForOneLayer( unsigned targetIndex ); @@ -149,10 +141,6 @@ class NetworkLevelReasoner : public LayerOwner void getConstraintTightenings( List &tightenings ); void clearConstraintTightenings(); - void receivePolygonalTighterBound( PolygonalTightening polygonal_tightening ); - void getConstraintPolygonalTightenings( List &polygonal_tightenings ); - void clearConstraintPolygonalTightenings(); - /* For debugging purposes: dump the network topology */ @@ -223,9 +211,8 @@ class NetworkLevelReasoner : public LayerOwner Map _layerIndexToLayer; const ITableau *_tableau; - // Tightenings and Polyognal Tightenings discovered by the various layers + // Tightenings discovered by the various layers List _boundTightenings; - List _polygonalBoundTightenings; std::unique_ptr _deepPolyAnalysis; diff --git a/src/nlr/tests/Test_LPRelaxation.h b/src/nlr/tests/Test_LPRelaxation.h index 9b58650b2d..654d5c1af0 100644 --- a/src/nlr/tests/Test_LPRelaxation.h +++ b/src/nlr/tests/Test_LPRelaxation.h @@ -19,7 +19,6 @@ #include "Layer.h" #include "NetworkLevelReasoner.h" #include "Options.h" -#include "PolygonalTightening.h" #include "Query.h" #include "Tightening.h" #include "Vector.h" diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 7c46ae4317..f194bc3ed6 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -19,7 +19,6 @@ #include "Layer.h" #include "NetworkLevelReasoner.h" #include "Options.h" -#include "PolygonalTightening.h" #include "Query.h" #include "Tightening.h" #include "Vector.h" From eb0e58f5ccd299fb05d8f9759e5f122596282836 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 27 Feb 2025 16:35:39 +0200 Subject: [PATCH 64/72] . --- src/nlr/tests/Test_NetworkLevelReasoner.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index e0a0a90015..e804755ef3 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -8530,6 +8530,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( boundsEqual( bounds, expectedBounds ) ); } + void test_sbt_abs_positive_and_negative() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); From 2df9d589d39e9c9d6eb05cc4edd958667e727882 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 27 Feb 2025 16:45:09 +0200 Subject: [PATCH 65/72] . --- src/nlr/tests/Test_LPRelaxation.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/nlr/tests/Test_LPRelaxation.h b/src/nlr/tests/Test_LPRelaxation.h index 703c6c54a2..654d5c1af0 100644 --- a/src/nlr/tests/Test_LPRelaxation.h +++ b/src/nlr/tests/Test_LPRelaxation.h @@ -4486,7 +4486,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT( boundsEqual( bounds, expectedBounds4 ) ); } - + void test_preimage_approximation_relu() { Options::get()->setString( Options::SYMBOLIC_BOUND_TIGHTENING_TYPE, "sbt" ); @@ -5736,7 +5736,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } return allFound; } - + // Create list of all tightenings in newBounds for which there is no bound in newBounds or in // bounds which is at least as tight. List removeRedundancies( const List &bounds, @@ -5772,7 +5772,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite } return minimalBounds; } - + void updateTableau( MockTableau &tableau, List &tightenings ) { for ( const auto &tightening : tightenings ) From eafa24c933f46c8123c58667fdabc03fe8793ef3 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 13 Mar 2025 14:37:31 +0200 Subject: [PATCH 66/72] Corrections to PreimageApproximation, parameterizes SBT. --- src/nlr/Layer.cpp | 137 +++++++++++++----------------- src/nlr/Layer.h | 8 +- src/nlr/tests/Test_LPRelaxation.h | 106 +++++++++++++++-------- 3 files changed, 136 insertions(+), 115 deletions(-) diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index 46db84ae8a..ecfaddb7d0 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -3580,22 +3580,21 @@ void Layer::computeParameterisedSymbolicBoundsForRelu( const Vector &coe variable. If they are tigheter than what was previously known, store them. */ - if ( _lb[i] < _symbolicLbOfLb[i] ) + if ( receive ) { - _lb[i] = _symbolicLbOfLb[i]; - - if ( receive ) + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; + } - if ( receive ) + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } } } } @@ -3797,22 +3796,21 @@ void Layer::computeParameterisedSymbolicBoundsForSign( const Vector &coe variable. If they are tigheter than what was previously known, store them. */ - if ( _lb[i] < _symbolicLbOfLb[i] ) + if ( receive ) { - _lb[i] = _symbolicLbOfLb[i]; - - if ( receive ) + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; + } - if ( receive ) + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } } } } @@ -4041,22 +4039,21 @@ void Layer::computeParameterisedSymbolicBoundsForLeakyRelu( const Vector variable. If they are tigheter than what was previously known, store them. */ - if ( _lb[i] < _symbolicLbOfLb[i] ) + if ( receive ) { - _lb[i] = _symbolicLbOfLb[i]; - - if ( receive ) + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; + } - if ( receive ) + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } } } } @@ -4279,22 +4276,21 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector variable. If they are tigheter than what was previously known, store them. */ - if ( _lb[i] < _symbolicLbOfLb[i] ) + if ( receive ) { - _lb[i] = _symbolicLbOfLb[i]; - - if ( receive ) + if ( _lb[i] < _symbolicLbOfLb[i] ) + { + _lb[i] = _symbolicLbOfLb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _lb[i], Tightening::LB ) ); - } - - if ( _ub[i] > _symbolicUbOfUb[i] ) - { - _ub[i] = _symbolicUbOfUb[i]; + } - if ( receive ) + if ( _ub[i] > _symbolicUbOfUb[i] ) + { + _ub[i] = _symbolicUbOfUb[i]; _layerOwner->receiveTighterBound( Tightening( _neuronToVariable[i], _ub[i], Tightening::UB ) ); + } } } } @@ -4419,7 +4415,7 @@ double Layer::EstimateVolume( const Vector &coeffs ) double ub = inputLayer->getUb( index ); if ( lb == ub ) - return 0; + continue; log_box_volume += std::log( ub - lb ); } @@ -4474,34 +4470,32 @@ double Layer::calculateDifferenceFromSymbolic( Map &point, uns return std::max( _ub[i] - upperSum, lowerSum - _lb[i] ); } -unsigned Layer::getNumberOfParameters( const Map &layers ) + +unsigned Layer::getNumberOfParametersPerType( Type t ) const +{ + if ( t == Layer::RELU || t == Layer::LEAKY_RELU ) + return 1; + + if ( t == Layer::SIGN || t == Layer::BILINEAR ) + return 2; + + return 0; +} + +unsigned Layer::getNumberOfParameters( const Map &layers ) const { unsigned index = 0; for ( auto pair : layers ) { unsigned layerIndex = pair.first; Layer *layer = layers[layerIndex]; - switch ( layer->getLayerType() ) - { - case Layer::RELU: - case Layer::LEAKY_RELU: - index++; - break; - - case Layer::SIGN: - case Layer::BILINEAR: - index += 2; - break; - - default: - break; - } + index += getNumberOfParametersPerType( layer->getLayerType() ); } return index; } Map> Layer::getParametersForLayers( const Map &layers, - const Vector &coeffs ) + const Vector &coeffs ) const { unsigned index = 0; Map> layerIndicesToParameters; @@ -4509,29 +4503,14 @@ Map> Layer::getParametersForLayers( const MapgetLayerType() ) - { - case Layer::RELU: - case Layer::LEAKY_RELU: + unsigned n_coeffs = getNumberOfParametersPerType( layer->getLayerType() ); + Vector current_coeffs( n_coeffs ); + for ( unsigned i = 0; i < n_coeffs; i++ ) { - layerIndicesToParameters.insert( layerIndex, Vector( { coeffs[index] } ) ); - index++; - } - break; - - case Layer::SIGN: - case Layer::BILINEAR: - { - layerIndicesToParameters.insert( - layerIndex, Vector( { coeffs[index], coeffs[index + 1] } ) ); - index += 2; - } - break; - - default: - layerIndicesToParameters.insert( layerIndex, Vector( {} ) ); - break; + current_coeffs[i] = coeffs[index + i]; } + layerIndicesToParameters.insert( layerIndex, current_coeffs ); + index += n_coeffs; } return layerIndicesToParameters; } diff --git a/src/nlr/Layer.h b/src/nlr/Layer.h index a9899964c2..889a42fdab 100644 --- a/src/nlr/Layer.h +++ b/src/nlr/Layer.h @@ -140,13 +140,17 @@ class Layer void computeSymbolicBounds(); void computeParameterisedSymbolicBounds( const Vector &coeffs, bool receive = false ); + + // Get number of optimizable parameters for parameterised SBT relaxation per layer type. + unsigned getNumberOfParametersPerType( Type t ) const; + // Get total number of optimizable parameters for parameterised SBT relaxation. - unsigned getNumberOfParameters( const Map &layers ); + unsigned getNumberOfParameters( const Map &layers ) const; // Get map containing vector of optimizable parameters for parameterised SBT relaxation for // every layer index. Map> getParametersForLayers( const Map &layers, - const Vector &coeffs ); + const Vector &coeffs ) const; // Return optimizable parameters which minimize parameterised SBT bounds' volume. const Vector OptimalParameterisedSymbolicBoundTightening(); diff --git a/src/nlr/tests/Test_LPRelaxation.h b/src/nlr/tests/Test_LPRelaxation.h index 654d5c1af0..56350ba655 100644 --- a/src/nlr/tests/Test_LPRelaxation.h +++ b/src/nlr/tests/Test_LPRelaxation.h @@ -4536,6 +4536,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 10, 0, Tightening::UB ), Tightening( 11, 1.625, Tightening::LB ), } ); @@ -4605,6 +4607,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { + Tightening( 8, 0, Tightening::LB ), + Tightening( 9, 0, Tightening::LB ), + Tightening( 11, 0.8472, Tightening::LB ), } ); @@ -4679,9 +4684,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 13, -4.25, Tightening::LB ), Tightening( 13, 3.25, Tightening::UB ), + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + Tightening( 16, 3.25, Tightening::UB ), + Tightening( 17, -11.1417, Tightening::LB ), Tightening( 17, 10, Tightening::UB ), + Tightening( 19, 0, Tightening::LB ), + Tightening( 19, 10, Tightening::UB ), + Tightening( 21, -17.3084, Tightening::LB ), Tightening( 21, 3.2160, Tightening::UB ), } ); @@ -4782,15 +4795,25 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + Tightening( 11, -5, Tightening::LB ), Tightening( 12, -4.6429, Tightening::LB ), Tightening( 13, 8.5519, Tightening::UB ), + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 16, 0, Tightening::LB ), + Tightening( 16, 8.5519, Tightening::UB ), + Tightening( 17, -23.6231, Tightening::LB ), Tightening( 17, 14.0909, Tightening::UB ), Tightening( 18, 2, Tightening::LB ), Tightening( 18, 28.2015, Tightening::UB ), + Tightening( 20, 2, Tightening::LB ), + Tightening( 20, 28.2015, Tightening::UB ), + Tightening( 21, -29.2015, Tightening::LB ), Tightening( 21, 6.5734, Tightening::UB ), } ); @@ -4849,13 +4872,17 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { + Tightening( 4, -0.1, Tightening::LB ), + Tightening( 7, -2, Tightening::LB ), + Tightening( 8, -0.045, Tightening::LB ), + Tightening( 9, -0.2, Tightening::LB ), + Tightening( 10, -1.8, Tightening::LB ), Tightening( 10, 0, Tightening::UB ), Tightening( 11, 1.4542, Tightening::LB ), - Tightening( 11, 1.4542, Tightening::LB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); @@ -4923,13 +4950,18 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { + Tightening( 4, -0.5, Tightening::LB ), + Tightening( 5, -0.4, Tightening::LB ), + Tightening( 6, -2, Tightening::LB ), Tightening( 6, 3.1, Tightening::UB ), Tightening( 7, -3.2, Tightening::LB ), Tightening( 7, 4, Tightening::UB ), + Tightening( 8, -0.2, Tightening::LB ), Tightening( 8, 3.1, Tightening::UB ), Tightening( 9, -0.32, Tightening::LB ), + Tightening( 9, 4, Tightening::UB ), Tightening( 10, -3.8726, Tightening::LB ), Tightening( 10, 0.03, Tightening::UB ), @@ -5004,28 +5036,24 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 11, -4.5, Tightening::LB ), - Tightening( 11, 8.5, Tightening::UB ), - Tightening( 12, -2.225, Tightening::LB ), - Tightening( 13, 2.975, Tightening::UB ), + Tightening( 7, -0.2, Tightening::LB ), Tightening( 8, -0.3, Tightening::LB ), + Tightening( 9, -0.3, Tightening::LB ), Tightening( 10, -0.6, Tightening::LB ), + + Tightening( 11, -4.5, Tightening::LB ), Tightening( 11, 8.5, Tightening::UB ), + Tightening( 12, -2.225, Tightening::LB ), Tightening( 13, 2.975, Tightening::UB ), Tightening( 13, -4.175, Tightening::LB ), - Tightening( 15, -0.2225, Tightening::LB ), - Tightening( 16, 2.975, Tightening::UB ), + Tightening( 14, -0.45, Tightening::LB ), Tightening( 14, 8.5, Tightening::UB ), + Tightening( 15, -0.2225, Tightening::LB ), Tightening( 16, 2.975, Tightening::UB ), Tightening( 16, -0.4175, Tightening::LB ), - Tightening( 17, -11.452, Tightening::LB ), - Tightening( 17, 10.18, Tightening::UB ), - Tightening( 18, 0.87, Tightening::LB ), - Tightening( 18, 16.0688, Tightening::UB ), + Tightening( 17, -11.452, Tightening::LB ), Tightening( 17, 10.18, Tightening::UB ), + Tightening( 18, 0.87, Tightening::LB ), Tightening( 18, 16.0688, Tightening::UB ), - Tightening( 19, -1.1452, Tightening::LB ), - Tightening( 19, 10.18, Tightening::UB ), - Tightening( 20, 0.87, Tightening::LB ), - Tightening( 20, 16.0688, Tightening::UB ), + Tightening( 19, -1.1452, Tightening::LB ), Tightening( 19, 10.18, Tightening::UB ), + Tightening( 20, 0.87, Tightening::LB ), Tightening( 20, 16.0688, Tightening::UB ), - Tightening( 21, -17.0684, Tightening::LB ), - Tightening( 21, 3.6767, Tightening::UB ), + Tightening( 21, -17.0684, Tightening::LB ), Tightening( 21, 3.6767, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); @@ -5124,28 +5152,24 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { - Tightening( 11, -5.6, Tightening::LB ), - Tightening( 11, 16.4636, Tightening::UB ), - Tightening( 12, -6.0286, Tightening::LB ), - Tightening( 13, -5.9, Tightening::LB ), + Tightening( 7, -0.2, Tightening::LB ), Tightening( 8, -0.5, Tightening::LB ), + Tightening( 9, -0.6, Tightening::LB ), Tightening( 10, -1.5, Tightening::LB ), + + Tightening( 11, -5.6, Tightening::LB ), Tightening( 11, 16.4636, Tightening::UB ), + Tightening( 12, -6.0286, Tightening::LB ), Tightening( 13, -5.9, Tightening::LB ), Tightening( 13, 8.0468, Tightening::UB ), - Tightening( 15, -0.6029, Tightening::LB ), - Tightening( 16, -0.59, Tightening::LB ), + Tightening( 14, -0.56, Tightening::LB ), Tightening( 14, 16.4636, Tightening::UB ), + Tightening( 15, -0.6029, Tightening::LB ), Tightening( 16, -0.59, Tightening::LB ), Tightening( 16, 8.0468, Tightening::UB ), - Tightening( 17, -24.8864, Tightening::LB ), - Tightening( 17, 14.3076, Tightening::UB ), - Tightening( 18, 0.75, Tightening::LB ), - Tightening( 18, 28.0272, Tightening::UB ), + Tightening( 17, -24.8864, Tightening::LB ), Tightening( 17, 14.3076, Tightening::UB ), + Tightening( 18, 0.75, Tightening::LB ), Tightening( 18, 28.0272, Tightening::UB ), - Tightening( 19, -2.4886, Tightening::LB ), - Tightening( 19, 14.3076, Tightening::UB ), - Tightening( 20, 0.75, Tightening::LB ), - Tightening( 20, 28.0272, Tightening::UB ), + Tightening( 19, -2.4886, Tightening::LB ), Tightening( 19, 14.3076, Tightening::UB ), + Tightening( 20, 0.75, Tightening::LB ), Tightening( 20, 28.0272, Tightening::UB ), - Tightening( 21, -29.9648, Tightening::LB ), - Tightening( 21, 6.9619, Tightening::UB ), + Tightening( 21, -29.9648, Tightening::LB ), Tightening( 21, 6.9619, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); @@ -5488,6 +5512,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { + Tightening( 10, 8.7411, Tightening::UB ), Tightening( 11, -2.4149, Tightening::LB ), } ); @@ -5556,10 +5581,13 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + Tightening( 8, 7, Tightening::UB ), Tightening( 9, 5.8235, Tightening::UB ), Tightening( 10, -14, Tightening::LB ), + Tightening( 10, 40.7647, Tightening::UB ), Tightening( 11, -24.8805, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), @@ -5626,7 +5654,11 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite List expectedBounds2( { Tightening( 12, -1.75, Tightening::LB ), - Tightening( 15, 0, Tightening::LB ), + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + + Tightening( 15, -0.0001, Tightening::LB ), + Tightening( 15, 78.8787, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); @@ -5705,10 +5737,16 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds4( { + Tightening( 7, 0, Tightening::LB ), + Tightening( 11, -5, Tightening::LB ), Tightening( 12, -4.6429, Tightening::LB ), + Tightening( 13, 0, Tightening::LB ), + Tightening( 14, 0, Tightening::LB ), + Tightening( 15, 0, Tightening::LB ), + Tightening( 15, 211.0082, Tightening::UB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); From b1711aa0d660bac2d88bd5b01a9949949f1c8f16 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 27 Mar 2025 01:06:28 +0200 Subject: [PATCH 67/72] Remove redundant setAlpha in ReLU NLR unit tests. --- src/nlr/tests/Test_LPRelaxation.h | 3 --- src/nlr/tests/Test_NetworkLevelReasoner.h | 3 --- 2 files changed, 6 deletions(-) diff --git a/src/nlr/tests/Test_LPRelaxation.h b/src/nlr/tests/Test_LPRelaxation.h index 56350ba655..49dbd06135 100644 --- a/src/nlr/tests/Test_LPRelaxation.h +++ b/src/nlr/tests/Test_LPRelaxation.h @@ -73,9 +73,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addLayer( 4, NLR::Layer::RELU, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - // Mark layer dependencies for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index e804755ef3..eb01e0b851 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -1959,9 +1959,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.addLayer( 4, NLR::Layer::RELU, 2 ); nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - // Mark layer dependencies for ( unsigned i = 1; i <= 5; ++i ) nlr.addLayerDependency( i - 1, i ); From f48b0506b6b027f2d8bdb117388ddf4a6e2069ec Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Wed, 17 Sep 2025 21:03:10 +0300 Subject: [PATCH 68/72] Correcting SBT, LP for Sigmoid, Bilinear, Softmax. SBT for Sigmoid: Corrected criterion for fixed Sigmoid neuron. SBT for Softmax: Symbolic bounds now stored correctly in _symbolicLb, _symbolicUb. SBT for Bilinear: Fixed calculation for symbolic lower/upper bias, fixed mistaken assumption both source neurons are from same layer. Parameterized SBT for Bilinear: Fixed calculation for symbolic lower/upper bias, fixed mistaken assumption both source neurons are from same layer. LP relaxation for Bilinear: Fixed mistaken assumption both source neurons are from same layer. Parameterized LP relaxation for Bilinear: Fixed mistaken assumption both source neurons are from same layer. Minor changes for code of PreimageApproximation optimization function: size_t -> unsigned, define sign variable at start of function, use Vector( double, unsigned ) constructor and std::min/std::max. --- src/nlr/LPFormulator.cpp | 30 ++--- src/nlr/Layer.cpp | 247 +++++++++++++++++++++------------------ 2 files changed, 147 insertions(+), 130 deletions(-) diff --git a/src/nlr/LPFormulator.cpp b/src/nlr/LPFormulator.cpp index b4152769d2..84447306f9 100644 --- a/src/nlr/LPFormulator.cpp +++ b/src/nlr/LPFormulator.cpp @@ -1,5 +1,5 @@ /********************* */ -/*! \file NetworkLevelReasoner.cpp +/*! \file LPFormulator.cpp ** \verbatim ** Top contributors (to current version): ** Guy Katz @@ -1483,20 +1483,21 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; Vector sourceNeurons; + Vector sourceLayers; bool allConstant = true; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + sourceLayers.append( sourceLayer ); sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); @@ -1545,10 +1546,10 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addGeqConstraint( terms, -sourceLbs[0] * sourceLbs[1] ); // Upper bound: out <= u_y * x + l_x * y - l_x * u_y @@ -1556,10 +1557,10 @@ void LPFormulator::addBilinearLayerToLpRelaxation( GurobiWrapper &gurobi, terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -sourceUbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -sourceLbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addLeqConstraint( terms, -sourceLbs[0] * sourceUbs[1] ); } } @@ -2048,20 +2049,21 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g List sources = layer->getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; Vector sourceNeurons; + Vector sourceLayers; bool allConstant = true; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); String sourceName = Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeuron ) ); + sourceLayers.append( sourceLayer ); sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); @@ -2113,17 +2115,17 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g // Upper bound: out <= a_u * x + b_u * y + c_u, where // a_u = alpha2 * u_y + ( 1 - alpha2 ) * l_y // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x - // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + // c_u = -alpha2 * l_x * u_y - ( 1 - alpha2 ) * u_x * l_y List terms; terms.clear(); terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -coeffs[0] * sourceLbs[1] - ( 1 - coeffs[0] ) * sourceUbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -coeffs[0] * sourceLbs[0] - ( 1 - coeffs[0] ) * sourceUbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addGeqConstraint( terms, -coeffs[0] * sourceLbs[0] * sourceLbs[1] - ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1] ); @@ -2132,10 +2134,10 @@ void LPFormulator::addBilinearLayerToParameterisedLpRelaxation( GurobiWrapper &g terms.append( GurobiWrapper::Term( 1, Stringf( "x%u", targetVariable ) ) ); terms.append( GurobiWrapper::Term( -coeffs[1] * sourceUbs[1] - ( 1 - coeffs[1] ) * sourceLbs[1], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[0] ) ) ) ); + Stringf( "x%u", sourceLayers[0]->neuronToVariable( sourceNeurons[0] ) ) ) ); terms.append( GurobiWrapper::Term( -coeffs[1] * sourceLbs[0] - ( 1 - coeffs[1] ) * sourceUbs[0], - Stringf( "x%u", sourceLayer->neuronToVariable( sourceNeurons[1] ) ) ) ); + Stringf( "x%u", sourceLayers[1]->neuronToVariable( sourceNeurons[1] ) ) ) ); gurobi.addLeqConstraint( terms, -coeffs[1] * sourceLbs[0] * sourceUbs[1] - ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1] ); diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index ecfaddb7d0..d4ae7af8a2 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -2267,7 +2267,7 @@ void Layer::computeSymbolicBoundsForSigmoid() double sourceLbSigmoid = SigmoidConstraint::sigmoid( sourceLb ); // Case when the Sigmoid constraint is fixed - if ( FloatUtils::areEqual( FloatUtils::round( sourceUb ), FloatUtils::round( sourceLb ) ) ) + if ( FloatUtils::areEqual( sourceLb, sourceUb ) ) { for ( unsigned j = 0; j < _inputLayerSize; ++j ) { @@ -2704,7 +2704,6 @@ void Layer::computeSymbolicBoundsForSoftmax() Vector sourceMids; Vector targetLbs; Vector targetUbs; - unsigned len = 0; for ( const auto &sourceIndex : sources ) { unsigned sourceNeuron = sourceIndex._neuron; @@ -2716,8 +2715,6 @@ void Layer::computeSymbolicBoundsForSoftmax() sourceMids.append( ( sourceLb + sourceUb ) / 2 ); targetLbs.append( _lb[i] ); targetUbs.append( _ub[i] ); - - ++len; } // Find the index of i in the softmax @@ -2756,8 +2753,8 @@ void Layer::computeSymbolicBoundsForSoftmax() _symbolicUpperBias[i] = _ub[i]; for ( const auto &sourceIndex : sources ) { - symbolicLb[len * sourceIndex._neuron + i] = 0; - symbolicUb[len * sourceIndex._neuron + i] = 0; + symbolicLb[_size * sourceIndex._neuron + i] = 0; + symbolicUb[_size * sourceIndex._neuron + i] = 0; } } else @@ -2780,7 +2777,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dldj = softmaxdLSELowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -2793,7 +2790,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dldj = softmaxdLSELowerBound2( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -2806,7 +2803,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dudj = softmaxdLSEUpperbound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[len * sourceIndex._neuron + i] = dudj; + symbolicUb[_size * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; } @@ -2820,7 +2817,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dldj = softmaxdERLowerBound( sourceMids, sourceLbs, sourceUbs, index, inputIndex ); - symbolicLb[len * sourceIndex._neuron + i] = dldj; + symbolicLb[_size * sourceIndex._neuron + i] = dldj; _symbolicLowerBias[i] -= dldj * sourceMids[inputIndex]; ++inputIndex; } @@ -2832,7 +2829,7 @@ void Layer::computeSymbolicBoundsForSoftmax() { double dudj = softmaxdERUpperBound( sourceMids, targetLbs, targetUbs, index, inputIndex ); - symbolicUb[len * sourceIndex._neuron + i] = dudj; + symbolicUb[_size * sourceIndex._neuron + i] = dudj; _symbolicUpperBias[i] -= dudj * sourceMids[inputIndex]; ++inputIndex; } @@ -3034,27 +3031,26 @@ void Layer::computeSymbolicBoundsForBilinear() List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; + Vector sourceNeurons; + Vector sourceLayerSizes; + Vector sourceLayers; bool allConstant = true; - unsigned indexA = 0; - unsigned indexB = 0; - unsigned counter = 0; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); + unsigned sourceLayerSize = sourceLayer->getSize(); + sourceLayers.append( sourceLayer ); + sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); + sourceLayerSizes.append( sourceLayerSize ); if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { @@ -3065,16 +3061,6 @@ void Layer::computeSymbolicBoundsForBilinear() double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); sourceValues.append( sourceValue ); } - - if ( counter == 0 ) - { - indexA = sourceIndex._neuron; - } - else - { - indexB = sourceIndex._neuron; - } - ++counter; } if ( allConstant ) @@ -3104,47 +3090,75 @@ void Layer::computeSymbolicBoundsForBilinear() // Symbolic upper bound: // out <= alpha * x + beta * y + gamma // where alpha = ub_y, beta = lb_x, gamma = -lb_x * ub_y + double aLower = sourceLbs[1]; + double aUpper = sourceUbs[1]; + double bLower = sourceLbs[0]; + double bUpper = sourceLbs[0]; + _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; + _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; + for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - if ( sourceLbs[1] >= 0 ) + if ( aLower >= 0 ) { _symbolicLb[j * _size + i] += - sourceLbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + aLower * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } else { _symbolicLb[j * _size + i] += - sourceLbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + aLower * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } - if ( sourceUbs[1] >= 0 ) + if ( aUpper >= 0 ) { _symbolicUb[j * _size + i] += - sourceUbs[1] * sourceSymbolicUb[j * sourceLayerSize + indexA]; + aUpper * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } else { _symbolicUb[j * _size + i] += - sourceUbs[1] * sourceSymbolicLb[j * sourceLayerSize + indexA]; + aUpper * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } - if ( sourceLbs[0] >= 0 ) + if ( bLower >= 0 ) { _symbolicLb[j * _size + i] += - sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; - _symbolicUb[j * _size + i] += - sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + bLower * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } else { _symbolicLb[j * _size + i] += - sourceLbs[0] * sourceSymbolicUb[j * sourceLayerSize + indexB]; + bLower * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + } + + if ( bUpper >= 0 ) + { _symbolicUb[j * _size + i] += - sourceLbs[0] * sourceSymbolicLb[j * sourceLayerSize + indexB]; + bUpper * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; + } + else + { + _symbolicUb[j * _size + i] += + bUpper * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } } - _symbolicLowerBias[i] = -sourceLbs[0] * sourceLbs[1]; - _symbolicUpperBias[i] = -sourceLbs[0] * sourceUbs[1]; double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); @@ -4091,27 +4105,26 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; + Vector sourceNeurons; + Vector sourceLayerSizes; + Vector sourceLayers; bool allConstant = true; - unsigned indexA = 0; - unsigned indexB = 0; - unsigned counter = 0; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); + unsigned sourceLayerSize = sourceLayer->getSize(); + sourceLayers.append( sourceLayer ); + sourceNeurons.append( sourceNeuron ); sourceLbs.append( sourceLb ); sourceUbs.append( sourceUb ); + sourceLayerSizes.append( sourceLayerSize ); if ( !sourceLayer->neuronEliminated( sourceNeuron ) ) { @@ -4122,16 +4135,6 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector double sourceValue = sourceLayer->getEliminatedNeuronValue( sourceNeuron ); sourceValues.append( sourceValue ); } - - if ( counter == 0 ) - { - indexA = sourceIndex._neuron; - } - else - { - indexB = sourceIndex._neuron; - } - ++counter; } if ( allConstant ) @@ -4155,65 +4158,89 @@ void Layer::computeParameterisedSymbolicBoundsForBilinear( const Vector } // Billinear linear relaxation (arXiv:2405.21063v2 [cs.LG]) - // Lower bound: out >= a_l * x + b_l * y + c_l, where - // a_l = alpha1 * l_y + ( 1 - alpha1 ) * u_y - // b_l = alpha1 * l_x + ( 1 - alpha1 ) * u_x - // c_l = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y + // Lower bound: out >= aLower * x + bLower * y + c_l, where + // aLower = alpha1 * l_y + ( 1 - alpha1 ) * u_y + // bLower = alpha1 * l_x + ( 1 - alpha1 ) * u_x + // cLower = -alpha1 * l_x * l_y - ( 1 - alpha1 ) * u_x * u_y - // Upper bound: out <= a_u * x + b_u * y + c_u, where - // a_u = alpha2 * u_y + ( 1 - alpha2 ) * l_y - // b_u = alpha2 * l_x + ( 1 - alpha2 ) * u_x - // c_u = -alpha2 * -l_x * u_y - ( 1 - alpha2 ) * u_x * l_y + // Upper bound: out <= aUpper * x + bUpper * y + c_u, where + // aUpper = alpha2 * u_y + ( 1 - alpha2 ) * l_y + // bUpper = alpha2 * l_x + ( 1 - alpha2 ) * u_x + // cUpper = -alpha2 * l_x * u_y - ( 1 - alpha2 ) * u_x * l_y - double a_l = coeffs[0] * sourceLbs[1] + ( 1 - coeffs[0] ) * sourceUbs[1]; - double a_u = coeffs[1] * sourceUbs[1] + ( 1 - coeffs[1] ) * sourceLbs[1]; - double b_l = coeffs[0] * sourceLbs[0] + ( 1 - coeffs[0] ) * sourceUbs[0]; - double b_u = coeffs[1] * sourceLbs[0] + ( 1 - coeffs[1] ) * sourceUbs[0]; + double aLower = coeffs[0] * sourceLbs[1] + ( 1 - coeffs[0] ) * sourceUbs[1]; + double aUpper = coeffs[1] * sourceUbs[1] + ( 1 - coeffs[1] ) * sourceLbs[1]; + double bLower = coeffs[0] * sourceLbs[0] + ( 1 - coeffs[0] ) * sourceUbs[0]; + double bUpper = coeffs[1] * sourceLbs[0] + ( 1 - coeffs[1] ) * sourceUbs[0]; + + _symbolicLowerBias[i] = -coeffs[0] * sourceLbs[0] * sourceLbs[1] - + ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1]; + _symbolicUpperBias[i] = -coeffs[1] * sourceLbs[0] * sourceUbs[1] - + ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1]; for ( unsigned j = 0; j < _inputLayerSize; ++j ) { - if ( a_l >= 0 ) + if ( aLower >= 0 ) { - _symbolicLb[j * _size + i] += a_l * sourceSymbolicLb[j * sourceLayerSize + indexA]; + _symbolicLb[j * _size + i] += + aLower * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } else { - _symbolicLb[j * _size + i] += a_l * sourceSymbolicUb[j * sourceLayerSize + indexA]; + _symbolicLb[j * _size + i] += + aLower * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicLowerBias[i] += aLower * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } - if ( a_u >= 0 ) + if ( aUpper >= 0 ) { - _symbolicUb[j * _size + i] += a_u * sourceSymbolicUb[j * sourceLayerSize + indexA]; + _symbolicUb[j * _size + i] += + aUpper * ( sourceLayers[0] + ->getSymbolicUb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicUpperBias() )[0]; } else { - _symbolicUb[j * _size + i] += a_u * sourceSymbolicLb[j * sourceLayerSize + indexA]; + _symbolicUb[j * _size + i] += + aUpper * ( sourceLayers[0] + ->getSymbolicLb() )[j * sourceLayerSizes[0] + sourceNeurons[0]]; + _symbolicUpperBias[i] += aUpper * ( sourceLayers[0]->getSymbolicLowerBias() )[0]; } - if ( b_l >= 0 ) + if ( bLower >= 0 ) { - _symbolicLb[j * _size + i] += b_l * sourceSymbolicLb[j * sourceLayerSize + indexB]; + _symbolicLb[j * _size + i] += + bLower * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } else { - _symbolicLb[j * _size + i] += b_l * sourceSymbolicUb[j * sourceLayerSize + indexB]; + _symbolicLb[j * _size + i] += + bLower * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bLower * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; } - if ( b_l >= 0 ) + if ( bUpper >= 0 ) { - _symbolicUb[j * _size + i] += b_u * sourceSymbolicUb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += + bUpper * ( sourceLayers[1] + ->getSymbolicUb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicUpperBias() )[1]; } else { - _symbolicUb[j * _size + i] += b_u * sourceSymbolicLb[j * sourceLayerSize + indexB]; + _symbolicUb[j * _size + i] += + bUpper * ( sourceLayers[1] + ->getSymbolicLb() )[j * sourceLayerSizes[1] + sourceNeurons[1]]; + _symbolicUpperBias[i] += bUpper * ( sourceLayers[1]->getSymbolicLowerBias() )[1]; } } - _symbolicLowerBias[i] = -coeffs[0] * sourceLbs[0] * sourceLbs[1] - - ( 1 - coeffs[0] ) * sourceUbs[0] * sourceUbs[1]; - _symbolicUpperBias[i] = -coeffs[1] * sourceLbs[0] * sourceUbs[1] - - ( 1 - coeffs[1] ) * sourceUbs[0] * sourceLbs[1]; - double lb = FloatUtils::infinity(); double ub = FloatUtils::negativeInfinity(); List values = { sourceLbs[0] * sourceLbs[1], @@ -4308,20 +4335,16 @@ const Vector Layer::OptimalParameterisedSymbolicBoundTightening() double lr = GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_LEARNING_RATE; unsigned dimension = getNumberOfParameters( layers ); bool maximize = false; + double sign = ( maximize ? 1 : -1 ); - Vector lower_bounds( dimension ); - Vector upper_bounds( dimension ); - for ( size_t j = 0; j < dimension; ++j ) - { - lower_bounds[j] = 0; - upper_bounds[j] = 1; - } + Vector lower_bounds( dimension, 0 ); + Vector upper_bounds( dimension, 1 ); // Initialize initial guess uniformly. Vector guess( dimension ); std::mt19937_64 rng( GlobalConfiguration::PREIMAGE_APPROXIMATION_OPTIMIZATION_RANDOM_SEED ); std::uniform_real_distribution dis( 0, 1 ); - for ( size_t j = 0; j < dimension; ++j ) + for ( unsigned j = 0; j < dimension; ++j ) { double lb = lower_bounds[j]; double ub = upper_bounds[j]; @@ -4331,10 +4354,10 @@ const Vector Layer::OptimalParameterisedSymbolicBoundTightening() Vector> candidates( dimension ); Vector gradient( dimension ); - for ( size_t i = 0; i < max_iterations; ++i ) + for ( unsigned i = 0; i < max_iterations; ++i ) { double current_cost = EstimateVolume( guess ); - for ( size_t j = 0; j < dimension; ++j ) + for ( unsigned j = 0; j < dimension; ++j ) { candidates[j] = Vector( guess ); candidates[j][j] += step_size; @@ -4345,13 +4368,12 @@ const Vector Layer::OptimalParameterisedSymbolicBoundTightening() continue; } - size_t sign = ( maximize == false ? 1 : -1 ); double cost = EstimateVolume( candidates[j] ); - gradient[j] = sign * ( cost - current_cost ) / step_size + weight_decay * guess[j]; + gradient[j] = ( cost - current_cost ) / step_size + weight_decay * guess[j]; } bool gradient_is_zero = true; - for ( size_t j = 0; j < dimension; ++j ) + for ( unsigned j = 0; j < dimension; ++j ) { if ( FloatUtils::abs( gradient[j] ) > epsilon ) { @@ -4363,19 +4385,12 @@ const Vector Layer::OptimalParameterisedSymbolicBoundTightening() break; } - for ( size_t j = 0; j < dimension; ++j ) + for ( unsigned j = 0; j < dimension; ++j ) { - guess[j] -= lr * gradient[j]; - - if ( guess[j] > upper_bounds[j] ) - { - guess[j] = upper_bounds[j]; - } + guess[j] += sign * lr * gradient[j]; - if ( guess[j] < lower_bounds[j] ) - { - guess[j] = lower_bounds[j]; - } + guess[j] = std::min( guess[j], upper_bounds[j] ); + guess[j] = std::max( guess[j], lower_bounds[j] ); } } From 96dd1e96de202b421cb823d332c0466a80cc6e88 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Wed, 17 Sep 2025 21:04:11 +0300 Subject: [PATCH 69/72] Corrected Test_NLR.h, Test_LPRelaxation.h. Fixed typos in documentation of some tests, Corrected one SBT Softmax test and two PreimageApproximation Bilinear tests, removed all void populateNetworkBackward*() functions from Test_NetworkLevelReasoner.h (all unused, duplicated from Test_LPRelaxation,h). --- src/nlr/tests/Test_LPRelaxation.h | 10 +- src/nlr/tests/Test_NetworkLevelReasoner.h | 2177 +-------------------- 2 files changed, 16 insertions(+), 2171 deletions(-) diff --git a/src/nlr/tests/Test_LPRelaxation.h b/src/nlr/tests/Test_LPRelaxation.h index 49dbd06135..a746f98607 100644 --- a/src/nlr/tests/Test_LPRelaxation.h +++ b/src/nlr/tests/Test_LPRelaxation.h @@ -3975,7 +3975,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 1, 1 ); - // Invoke SBT + // Invoke DeepPoly TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); TS_ASSERT_THROWS_NOTHING( nlr.deepPolyPropagation() ); @@ -5509,8 +5509,8 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.lpRelaxationPropagation() ); List expectedBounds2( { - Tightening( 10, 8.7411, Tightening::UB ), - Tightening( 11, -2.4149, Tightening::LB ), + Tightening( 10, 8.361, Tightening::UB ), + Tightening( 11, -8.361, Tightening::LB ), } ); TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( newBounds ) ); @@ -5579,14 +5579,14 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite List expectedBounds4( { Tightening( 7, 0, Tightening::LB ), - Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 5.8235, Tightening::UB ), Tightening( 10, -14, Tightening::LB ), Tightening( 10, 40.7647, Tightening::UB ), - Tightening( 11, -24.8805, Tightening::LB ), + Tightening( 11, -40.7647, Tightening::LB ), Tightening( 11, 14, Tightening::UB ), } ); diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index eb01e0b851..4a0343d6b1 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -1641,7 +1641,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x6 x7 x8 = softmax(x3, x4, x5) x9 = x6 + x7 + x8 - x10 = x6 + x7 + x8 + x10 = - x6 - x7 - x8 */ @@ -1932,2161 +1932,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite tableau.setUpperBound( 5, large ); } - void populateNetworkBackwardReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - 1 R -1 R -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ R / \ R / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using ReLU activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardReLU2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = ReLU( x3 ) - x8 = ReLU( x4 ) - x9 = ReLU( x5 ) - x10 = ReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = ReLU( x11 ) - x15 = ReLU( x12 ) - x16 = ReLU( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = ReLU( x17 ) - x20 = ReLU( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::RELU, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the ReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSigmoid( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - 1 S -1 S -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ S / \ S / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Sigmoid activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardSigmoid2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Sigmoid( x3 ) - x8 = Sigmoid( x4 ) - x9 = Sigmoid( x5 ) - x10 = Sigmoid( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Sigmoid( x11 ) - x15 = Sigmoid( x12 ) - x16 = Sigmoid( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Sigmoid( x17 ) - x20 = Sigmoid( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SIGMOID, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SIGMOID, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::SIGMOID, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Sigmoid sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSign( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - 1 Sign -1 Sign -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ Sign / \ Sign / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Sign activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardSign2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Sign( x3 ) - x8 = Sign( x4 ) - x9 = SIgn( x5 ) - x10 = Sign( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Sign( x11 ) - x15 = Sign( x12 ) - x16 = Sign( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Sign( x17 ) - x20 = Sign( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SIGN, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SIGN, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::SIGN, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Sign sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardRound( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 Rnd -1 Rnd -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ Rnd / \ Rnd / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Round activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardRound2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Round( x3 ) - x8 = Round( x4 ) - x9 = Round( x5 ) - x10 = Round( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Round( x11 ) - x15 = Round( x12 ) - x16 = Round( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Round( x17 ) - x20 = Round( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::ROUND, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::ROUND, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::ROUND, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Round sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardAbs( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 A -1 A -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ A / \ A / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - using Absolute value activation instead of LeakyReLU - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardAbs2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = Abs( x3 ) - x8 = Abs( x4 ) - x9 = Abs( x5 ) - x10 = Abs( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = Abs( x11 ) - x15 = Abs( x12 ) - x16 = Abs( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = Abs( x17 ) - x20 = Abs( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::ABSOLUTE_VALUE, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::ABSOLUTE_VALUE, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::ABSOLUTE_VALUE, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the Abs sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardLeakyReLU( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - - 1 LR -1 LR -1 2 - x0 --- x2 ---> x4 --- x6 ---> x8 --- x10 - \ / \ / \ / - 1 \ / 2 \ / 1 \ / - \/ \/ \/ - /\ /\ /\ - -1 / \ 1 / \ -1 / \ - / \ LR / \ LR / \ - x1 --- x3 ---> x5 --- x7 ---> x9 --- x11 - 1 -1 2 - - The example described in Fig. 2 of - https://dl.acm.org/doi/10.1145/3563325 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 1, 1, 0, -1 ); - nlr.setWeight( 0, 1, 1, 1, 1 ); - - nlr.setWeight( 2, 0, 3, 0, -1 ); - nlr.setWeight( 2, 0, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, -1 ); - nlr.setWeight( 4, 1, 5, 1, 2 ); - - nlr.setBias( 5, 1, 2 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 3 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 6 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 10 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardLeakyRelu2( NLR::NetworkLevelReasoner &nlr, MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 x17 x19 - x1 x12 x15 x21 - x5 x9 x18 x20 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = LeakyReLU( x3 ) - x8 = LeakyReLU( x4 ) - x9 = LeakyReLU( x5 ) - x10 = LeakyReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14 = LeakyReLU( x11 ) - x15 = LeakyReLU( x12 ) - x16 = LeakyReLU( x13 ) - - x17 = -x14 + x15 - x16 - x18 = x14 + x15 + x16 - - x19 = LeakyReLU( x17 ) - x20 = LeakyReLU( x18 ) - - x21 = x19 - x20 - 1 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::LEAKY_RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::LEAKY_RELU, 3 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 6, NLR::Layer::LEAKY_RELU, 2 ); - nlr.addLayer( 7, NLR::Layer::WEIGHTED_SUM, 1 ); - - nlr.getLayer( 2 )->setAlpha( 0.1 ); - nlr.getLayer( 4 )->setAlpha( 0.1 ); - nlr.getLayer( 6 )->setAlpha( 0.1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 7; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - nlr.setWeight( 4, 0, 5, 1, 1 ); - nlr.setWeight( 4, 1, 5, 0, 1 ); - nlr.setWeight( 4, 1, 5, 1, 1 ); - nlr.setWeight( 4, 2, 5, 0, -1 ); - nlr.setWeight( 4, 2, 5, 1, 1 ); - - nlr.setWeight( 6, 0, 7, 0, 1 ); - nlr.setWeight( 6, 1, 7, 0, -1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - nlr.setBias( 7, 0, -1 ); - - // Mark the LeakyReLU sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 5, 0, 6, 0 ); - nlr.addActivationSource( 5, 1, 6, 1 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 1 ), 18 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 0 ), 19 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 6, 1 ), 20 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 7, 0 ), 21 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 22 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - tableau.setLowerBound( 18, -large ); - tableau.setUpperBound( 18, large ); - tableau.setLowerBound( 19, -large ); - tableau.setUpperBound( 19, large ); - tableau.setLowerBound( 20, -large ); - tableau.setUpperBound( 20, large ); - tableau.setLowerBound( 21, -large ); - tableau.setUpperBound( 21, large ); - } - - void populateNetworkBackwardSoftmaxAndMax( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - a a' - x e - b b' f h - y d - c c' - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::MAX, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - - void populateNetworkBackwardSoftmaxAndMax2( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - x3 x7 - x0 x11 x14 - x4 x8 - x1 x12 x15 x17 - x5 x9 - x2 x13 x16 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7, x8, x9, x10 = Softmax( x2, x3, x4, x5 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - x13 = x7 - x8 + 2x9 - 2 - - x14, x15, x16 = Softmax( x11, x12, x13 ) - - x17 = Max( x14, x15, x16 ) - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::SOFTMAX, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 4, NLR::Layer::SOFTMAX, 3 ); - nlr.addLayer( 5, NLR::Layer::MAX, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 0, 3, 2, 1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 1, 3, 2, -1 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 2, 3, 2, 2 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setBias( 3, 0, 2 ); - nlr.setBias( 3, 2, -2 ); - - // Mark the Softmax/Max sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 0 ); - nlr.addActivationSource( 1, 2, 2, 0 ); - nlr.addActivationSource( 1, 3, 2, 0 ); - nlr.addActivationSource( 1, 0, 2, 1 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 1 ); - nlr.addActivationSource( 1, 3, 2, 1 ); - nlr.addActivationSource( 1, 0, 2, 2 ); - nlr.addActivationSource( 1, 1, 2, 2 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 2 ); - nlr.addActivationSource( 1, 0, 2, 3 ); - nlr.addActivationSource( 1, 1, 2, 3 ); - nlr.addActivationSource( 1, 2, 2, 3 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - nlr.addActivationSource( 3, 2, 4, 0 ); - nlr.addActivationSource( 3, 0, 4, 1 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - nlr.addActivationSource( 3, 2, 4, 1 ); - nlr.addActivationSource( 3, 0, 4, 2 ); - nlr.addActivationSource( 3, 1, 4, 2 ); - nlr.addActivationSource( 3, 2, 4, 2 ); - - nlr.addActivationSource( 4, 0, 5, 0 ); - nlr.addActivationSource( 4, 1, 5, 0 ); - nlr.addActivationSource( 4, 2, 5, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 2 ), 13 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 14 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 15 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 2 ), 16 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 17 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 18 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - tableau.setLowerBound( 16, -large ); - tableau.setUpperBound( 16, large ); - tableau.setLowerBound( 17, -large ); - tableau.setUpperBound( 17, large ); - } - - void populateNetworkBackwardReluAndBilinear( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - a a' - x e - b b' f h - y d - c c' - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 2 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 3 ); - nlr.addLayer( 2, NLR::Layer::RELU, 3 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::BILINEAR, 1 ); - nlr.addLayer( 5, NLR::Layer::WEIGHTED_SUM, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, 1 ); - nlr.setWeight( 0, 0, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 1, -3 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - - nlr.setWeight( 2, 0, 3, 0, 1 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 0, 1 ); - nlr.setWeight( 2, 1, 3, 1, 1 ); - nlr.setWeight( 2, 2, 3, 0, -1 ); - nlr.setWeight( 2, 2, 3, 1, -1 ); - - nlr.setWeight( 4, 0, 5, 0, -1 ); - - nlr.setBias( 1, 0, 1 ); - nlr.setBias( 3, 1, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 2 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 7 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 9 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 11 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 12 ); - tableau.setLowerBound( 2, -large ); - tableau.setUpperBound( 2, large ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - } - - void populateNetworkBackwardReluAndBilinear2( NLR::NetworkLevelReasoner &nlr, - MockTableau &tableau ) - { - /* - x3 x7 - x0 - x4 x8 x11 x13 - x1 x15 - x5 x9 x12 x14 - x2 - x6 x10 - - x3 = -x0 + x1 - x4 = x0 + 2*x1 - x5 = x0 + x1 + x2 - x6 = 3*x0 - 2*x1 - x2 - - x7 = ReLU( x3 ) - x8 = ReLU( x4 ) - x9 = ReLU( x5 ) - x10 = ReLU( x6 ) - - x11 = 2x7 + x9 - x10 + 2 - x12 = -x7 + 2x8 + x10 - - x13 = ReLU( x11 ) - x14 = ReLU( x12 ) - - x15 = x13 * x14 - */ - - // Create the layers - nlr.addLayer( 0, NLR::Layer::INPUT, 3 ); - nlr.addLayer( 1, NLR::Layer::WEIGHTED_SUM, 4 ); - nlr.addLayer( 2, NLR::Layer::RELU, 4 ); - nlr.addLayer( 3, NLR::Layer::WEIGHTED_SUM, 2 ); - nlr.addLayer( 4, NLR::Layer::RELU, 2 ); - nlr.addLayer( 5, NLR::Layer::BILINEAR, 1 ); - - // Mark layer dependencies - for ( unsigned i = 1; i <= 5; ++i ) - nlr.addLayerDependency( i - 1, i ); - - // Set the weights and biases for the weighted sum layers - nlr.setWeight( 0, 0, 1, 0, -1 ); - nlr.setWeight( 0, 0, 1, 1, 1 ); - nlr.setWeight( 0, 0, 1, 2, 1 ); - nlr.setWeight( 0, 0, 1, 3, 3 ); - nlr.setWeight( 0, 1, 1, 0, 1 ); - nlr.setWeight( 0, 1, 1, 1, 2 ); - nlr.setWeight( 0, 1, 1, 2, 1 ); - nlr.setWeight( 0, 1, 1, 3, -2 ); - nlr.setWeight( 0, 2, 1, 2, 1 ); - nlr.setWeight( 0, 2, 1, 3, -1 ); - - nlr.setWeight( 2, 0, 3, 0, 2 ); - nlr.setWeight( 2, 0, 3, 1, -1 ); - nlr.setWeight( 2, 1, 3, 1, 2 ); - nlr.setWeight( 2, 2, 3, 0, 1 ); - nlr.setWeight( 2, 3, 3, 0, -1 ); - nlr.setWeight( 2, 3, 3, 1, 1 ); - - nlr.setBias( 3, 0, 2 ); - - // Mark the ReLU/Bilinear sources - nlr.addActivationSource( 1, 0, 2, 0 ); - nlr.addActivationSource( 1, 1, 2, 1 ); - nlr.addActivationSource( 1, 2, 2, 2 ); - nlr.addActivationSource( 1, 3, 2, 3 ); - - nlr.addActivationSource( 3, 0, 4, 0 ); - nlr.addActivationSource( 3, 1, 4, 1 ); - - nlr.addActivationSource( 4, 0, 5, 0 ); - nlr.addActivationSource( 4, 1, 5, 0 ); - - // Variable indexing - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 0 ), 0 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 1 ), 1 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 0, 2 ), 2 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 0 ), 3 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 1 ), 4 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 2 ), 5 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 1, 3 ), 6 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 0 ), 7 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 1 ), 8 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 2 ), 9 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 2, 3 ), 10 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 0 ), 11 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 3, 1 ), 12 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 0 ), 13 ); - nlr.setNeuronVariable( NLR::NeuronIndex( 4, 1 ), 14 ); - - nlr.setNeuronVariable( NLR::NeuronIndex( 5, 0 ), 15 ); - - // Very loose bounds for neurons except inputs - double large = 1000000; - - tableau.getBoundManager().initialize( 16 ); - tableau.setLowerBound( 3, -large ); - tableau.setUpperBound( 3, large ); - tableau.setLowerBound( 4, -large ); - tableau.setUpperBound( 4, large ); - tableau.setLowerBound( 5, -large ); - tableau.setUpperBound( 5, large ); - tableau.setLowerBound( 6, -large ); - tableau.setUpperBound( 6, large ); - tableau.setLowerBound( 7, -large ); - tableau.setUpperBound( 7, large ); - tableau.setLowerBound( 8, -large ); - tableau.setUpperBound( 8, large ); - tableau.setLowerBound( 9, -large ); - tableau.setUpperBound( 9, large ); - tableau.setLowerBound( 10, -large ); - tableau.setUpperBound( 10, large ); - tableau.setLowerBound( 11, -large ); - tableau.setUpperBound( 11, large ); - tableau.setLowerBound( 12, -large ); - tableau.setUpperBound( 12, large ); - tableau.setLowerBound( 13, -large ); - tableau.setUpperBound( 13, large ); - tableau.setLowerBound( 14, -large ); - tableau.setUpperBound( 14, large ); - tableau.setLowerBound( 15, -large ); - tableau.setUpperBound( 15, large ); - } - void test_evaluate_relu() { NLR::NetworkLevelReasoner nlr; @@ -8196,7 +6041,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 2 (with residual from x0): - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] x3 range: [-1, 2.5] @@ -8212,7 +6057,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite x5.lb = 3 ( 0 ) + 3 ( x0 ) + 1 = 3x0 + 1 : [-2, 4] x5.ub = 3 ( -15/14 x0 + 20/14 ) + 3 ( x0 ) + 1 = -3/14 x0 + 74/14 : [71/14, 77/14 = 5.5] - x5 range: [-2, 4] + x5 range: [-2, 5.5] */ List expectedBounds( { @@ -8268,7 +6113,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 2 (with residual from x0): - x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 1] + x3.lb = -1( 0.5x0 + 0.5 ) -x0 + 1 = -1.5x0 + 0.5 : [-1, 2] x3.ub = -1( 0.5x0 ) -1x0 + 1 = -1.5x0 + 1 : [-0.5, 2.5] x3 range: [-1, 2.5] @@ -9627,10 +7472,10 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), - Tightening( 13, 0.9470, Tightening::LB ), Tightening( 13, 0.9470, Tightening::UB ), - Tightening( 14, -0.9470, Tightening::LB ), Tightening( 14, -0.9470, Tightening::UB ), - Tightening( 15, 1.0253, Tightening::LB ), Tightening( 15, 1.0253, Tightening::UB ), - Tightening( 16, -1.0253, Tightening::LB ), Tightening( 16, -1.0253, Tightening::UB ) + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ) } ); @@ -9733,9 +7578,9 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite Layer 3: - x7.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] - x7.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] - x4 range: [-21, 5] + x5.lb = -1 ( 2x0 - 5x1 + 3 ) = -2x0 + 7x1 - 3 : [-21, 0] + x5.ub = -1 ( -2x0 + 3x1 - 1 ) = 2x0 + x1 + 1 : [2, 7] + x5 range: [-21, 7] */ List expectedBounds( { Tightening( 2, -1, Tightening::LB ), From c98a27895a63047dd03c64beffd7ae75c8c75694 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 18 Sep 2025 20:52:19 +0300 Subject: [PATCH 70/72] Further fixing for Max, Softmax, Bilinear IA/SBT Interval Arithmetic and Symbolic Bound Tightening for Max, Softmax, Bilinear mistakenly assume 1. All source neurons are from same layer. 2. The size of the current layer is equal to the number of source neurons of any softmax neuron. Fixed both errors. --- src/nlr/Layer.cpp | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/src/nlr/Layer.cpp b/src/nlr/Layer.cpp index d4ae7af8a2..d24a952225 100644 --- a/src/nlr/Layer.cpp +++ b/src/nlr/Layer.cpp @@ -1104,7 +1104,6 @@ void Layer::computeIntervalArithmeticBoundsForMax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); double maxLowerBound = FloatUtils::negativeInfinity(); @@ -1115,6 +1114,7 @@ void Layer::computeIntervalArithmeticBoundsForMax() unsigned counter = 0; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -1196,14 +1196,12 @@ void Layer::computeIntervalArithmeticBoundsForSoftmax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - ASSERT( sourceLayer->getSize() == _size ); Vector sourceLbs; Vector sourceUbs; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -1253,14 +1251,13 @@ void Layer::computeIntervalArithmeticBoundsForBilinear() List sources = getActivationSources( i ); ASSERT( sources.size() == 2 ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - Vector sourceLbs; Vector sourceUbs; Vector sourceValues; bool allConstant = true; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -2546,11 +2543,6 @@ void Layer::computeSymbolicBoundsForMax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - unsigned sourceLayerSize = sourceLayer->getSize(); - const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); - const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); NeuronIndex indexOfMaxLowerBound = *( sources.begin() ); double maxLowerBound = FloatUtils::negativeInfinity(); @@ -2560,6 +2552,7 @@ void Layer::computeSymbolicBoundsForMax() Map sourceUbs; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -2591,6 +2584,11 @@ void Layer::computeSymbolicBoundsForMax() } } + const Layer *sourceLayer = _layerOwner->getLayer( indexOfMaxLowerBound._layer ); + unsigned sourceLayerSize = sourceLayer->getSize(); + const double *sourceSymbolicLb = sourceLayer->getSymbolicLb(); + const double *sourceSymbolicUb = sourceLayer->getSymbolicUb(); + if ( phaseFixed ) { // Phase fixed @@ -2670,7 +2668,6 @@ void Layer::computeSymbolicBoundsForSoftmax() std::fill_n( _work, _size * _size, 0 ); Set handledInputNeurons; - unsigned sourceLayerSize = _size; SoftmaxBoundType boundType = Options::get()->getSoftmaxBoundType(); for ( unsigned i = 0; i < _size; ++i ) @@ -2694,10 +2691,6 @@ void Layer::computeSymbolicBoundsForSoftmax() ASSERT( _neuronToActivationSources.exists( i ) ); List sources = getActivationSources( i ); - const Layer *sourceLayer = _layerOwner->getLayer( sources.begin()->_layer ); - - sourceLayerSize = sourceLayer->getSize(); - ASSERT( sourceLayerSize == _size ); Vector sourceLbs; Vector sourceUbs; @@ -2706,6 +2699,7 @@ void Layer::computeSymbolicBoundsForSoftmax() Vector targetUbs; for ( const auto &sourceIndex : sources ) { + const Layer *sourceLayer = _layerOwner->getLayer( sourceIndex._layer ); unsigned sourceNeuron = sourceIndex._neuron; double sourceLb = sourceLayer->getLb( sourceNeuron ); double sourceUb = sourceLayer->getUb( sourceNeuron ); @@ -2840,6 +2834,7 @@ void Layer::computeSymbolicBoundsForSoftmax() for ( const auto &sourceLayerEntry : _sourceLayers ) { const Layer *sourceLayer = _layerOwner->getLayer( sourceLayerEntry.first ); + unsigned sourceLayerSize = sourceLayer->getSize(); /* Perform the multiplication From 29bfda9bd7bb18333ac206d9bb930c9b1ffea8c4 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 18 Sep 2025 20:52:56 +0300 Subject: [PATCH 71/72] Clang-Format --- src/nlr/tests/Test_NetworkLevelReasoner.h | 28 +++++++++++------------ 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/nlr/tests/Test_NetworkLevelReasoner.h b/src/nlr/tests/Test_NetworkLevelReasoner.h index 4a0343d6b1..e422c14183 100644 --- a/src/nlr/tests/Test_NetworkLevelReasoner.h +++ b/src/nlr/tests/Test_NetworkLevelReasoner.h @@ -7462,20 +7462,20 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite */ List expectedBounds( - { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), - Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), - Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), - Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), - Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), - Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), - Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), - Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), - Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), - Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), - Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), - Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), - Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), - Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ) + { Tightening( 3, 2, Tightening::LB ), Tightening( 3, 2, Tightening::UB ), + Tightening( 4, 3, Tightening::LB ), Tightening( 4, 3, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), Tightening( 5, 0, Tightening::UB ), + Tightening( 6, -1, Tightening::LB ), Tightening( 6, -1, Tightening::UB ), + Tightening( 7, -2, Tightening::LB ), Tightening( 7, -2, Tightening::UB ), + Tightening( 8, 0.8668, Tightening::LB ), Tightening( 8, 0.8668, Tightening::UB ), + Tightening( 9, 0.9820, Tightening::LB ), Tightening( 9, 0.9820, Tightening::UB ), + Tightening( 10, 0.1173, Tightening::LB ), Tightening( 10, 0.1173, Tightening::UB ), + Tightening( 11, 0.0179, Tightening::LB ), Tightening( 11, 0.0179, Tightening::UB ), + Tightening( 12, 0.0159, Tightening::LB ), Tightening( 12, 0.0159, Tightening::UB ), + Tightening( 13, 1, Tightening::LB ), Tightening( 13, 1, Tightening::UB ), + Tightening( 14, -1, Tightening::LB ), Tightening( 14, -1, Tightening::UB ), + Tightening( 15, 1, Tightening::LB ), Tightening( 15, 1, Tightening::UB ), + Tightening( 16, -1, Tightening::LB ), Tightening( 16, -1, Tightening::UB ) } ); From 8fcd8b0f08c0d24ad98998e48c9adc9a8aabcf80 Mon Sep 17 00:00:00 2001 From: ido-shm-uel Date: Thu, 16 Oct 2025 20:06:50 +0300 Subject: [PATCH 72/72] Manually Sync Fork: Towards Proof Minimization (#889). --- CHANGELOG.md | 1 + maraboupy/MarabouCore.cpp | 8 +- src/common/Pair.h | 5 + src/common/Statistics.cpp | 25 +-- src/common/Statistics.h | 9 +- src/configuration/GlobalConfiguration.cpp | 11 +- src/configuration/GlobalConfiguration.h | 12 +- src/engine/AbsoluteValueConstraint.cpp | 54 ++++-- src/engine/BoundManager.cpp | 26 ++- src/engine/BoundManager.h | 4 +- ...{CDSmtCore.cpp => CDSearchTreeHandler.cpp} | 91 +++++----- .../{CDSmtCore.h => CDSearchTreeHandler.h} | 39 +++-- src/engine/CMakeLists.txt | 6 +- src/engine/DnCWorker.cpp | 23 +-- src/engine/Engine.cpp | 165 ++++++++++-------- src/engine/Engine.h | 98 ++++++----- src/engine/EngineState.h | 2 +- src/engine/IBoundManager.h | 17 +- src/engine/IEngine.h | 20 ++- src/engine/LeakyReluConstraint.cpp | 76 +++++--- src/engine/MaxConstraint.cpp | 22 +-- src/engine/MaxConstraint.h | 2 +- src/engine/PiecewiseLinearConstraint.cpp | 1 + src/engine/PiecewiseLinearConstraint.h | 6 +- src/engine/PrecisionRestorer.cpp | 25 +-- src/engine/PrecisionRestorer.h | 4 +- src/engine/ReluConstraint.cpp | 74 +++++--- .../{SmtCore.cpp => SearchTreeHandler.cpp} | 93 +++++----- src/engine/{SmtCore.h => SearchTreeHandler.h} | 51 +++--- ...SmtStackEntry.h => SearchTreeStackEntry.h} | 16 +- src/engine/{SmtState.h => SearchTreeState.h} | 16 +- src/engine/SignConstraint.cpp | 35 ++-- src/engine/SubQuery.h | 4 +- src/engine/Tableau.cpp | 23 ++- src/engine/tests/MockBoundManager.h | 4 +- src/engine/tests/MockEngine.h | 40 ++--- ...est_SmtCore.h => Test_SearchTreeHandler.h} | 152 ++++++++-------- src/proofs/Checker.cpp | 6 + src/proofs/PlcLemma.cpp | 22 ++- src/proofs/PlcLemma.h | 10 +- src/proofs/UnsatCertificateNode.cpp | 8 + src/proofs/UnsatCertificateNode.h | 5 + src/proofs/UnsatCertificateUtils.cpp | 88 ++++++---- src/proofs/UnsatCertificateUtils.h | 17 ++ src/proofs/tests/Test_UnsatCertificateNode.h | 6 +- 45 files changed, 835 insertions(+), 587 deletions(-) rename src/engine/{CDSmtCore.cpp => CDSearchTreeHandler.cpp} (79%) rename src/engine/{CDSmtCore.h => CDSearchTreeHandler.h} (86%) rename src/engine/{SmtCore.cpp => SearchTreeHandler.cpp} (86%) rename src/engine/{SmtCore.h => SearchTreeHandler.h} (79%) rename src/engine/{SmtStackEntry.h => SearchTreeStackEntry.h} (80%) rename src/engine/{SmtState.h => SearchTreeState.h} (82%) rename src/engine/tests/{Test_SmtCore.h => Test_SearchTreeHandler.h} (75%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f6fda64c2..fefd18b381 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ - Implemented backward analysis using preimage-approximation algorithm for `Relu`, `LeakyRelu`, `Sign` and `Bilinear` Layers. - Added the BaBSR heuristic as a new branching strategy for ReLU Splitting - Support Sub of two variables, "Mul" of two constants, Slice, and ConstantOfShape in the python onnx parser + - Renamed SmtCore module to SearchTreeHandler ## Version 2.0.0 diff --git a/maraboupy/MarabouCore.cpp b/maraboupy/MarabouCore.cpp index 8d35646b13..3bbfc9c51b 100644 --- a/maraboupy/MarabouCore.cpp +++ b/maraboupy/MarabouCore.cpp @@ -833,8 +833,8 @@ PYBIND11_MODULE( MarabouCore, m ) .value( "NUM_POPS", Statistics::StatisticsUnsignedAttribute::NUM_POPS ) .value( "CURRENT_DECISION_LEVEL", Statistics::StatisticsUnsignedAttribute::CURRENT_DECISION_LEVEL ) - .value( "NUM_PL_SMT_ORIGINATED_SPLITS", - Statistics::StatisticsUnsignedAttribute::NUM_PL_SMT_ORIGINATED_SPLITS ) + .value( "NUM_PL_SEARCH_TREE_ORIGINATED_SPLITS", + Statistics::StatisticsUnsignedAttribute::NUM_PL_SEARCH_TREE_ORIGINATED_SPLITS ) .value( "NUM_VISITED_TREE_STATES", Statistics::StatisticsUnsignedAttribute::NUM_VISITED_TREE_STATES ) .value( "PP_NUM_TIGHTENING_ITERATIONS", @@ -862,8 +862,8 @@ PYBIND11_MODULE( MarabouCore, m ) py::enum_( m, "StatisticsLongAttribute" ) .value( "NUM_TIGHTENINGS_FROM_EXPLICIT_BASIS", Statistics::StatisticsLongAttribute::NUM_TIGHTENINGS_FROM_EXPLICIT_BASIS ) - .value( "TOTAL_TIME_SMT_CORE_MICRO", - Statistics::StatisticsLongAttribute::TOTAL_TIME_SMT_CORE_MICRO ) + .value( "TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO", + Statistics::StatisticsLongAttribute::TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO ) .value( "TOTAL_TIME_PERFORMING_VALID_CASE_SPLITS_MICRO", Statistics::StatisticsLongAttribute::TOTAL_TIME_PERFORMING_VALID_CASE_SPLITS_MICRO ) .value( "PREPROCESSING_TIME_MICRO", diff --git a/src/common/Pair.h b/src/common/Pair.h index 7cf7d46b5e..f4ca7a9d43 100644 --- a/src/common/Pair.h +++ b/src/common/Pair.h @@ -33,6 +33,11 @@ template class Pair { } + Pair( const Pair &other ) + : _container( other._container ) + { + } + L &first() { return _container.first; diff --git a/src/common/Statistics.cpp b/src/common/Statistics.cpp index b9f7e72e52..9202a9a759 100644 --- a/src/common/Statistics.cpp +++ b/src/common/Statistics.cpp @@ -24,7 +24,7 @@ Statistics::Statistics() _unsignedAttributes[NUM_PL_CONSTRAINTS] = 0; _unsignedAttributes[NUM_ACTIVE_PL_CONSTRAINTS] = 0; _unsignedAttributes[NUM_PL_VALID_SPLITS] = 0; - _unsignedAttributes[NUM_PL_SMT_ORIGINATED_SPLITS] = 0; + _unsignedAttributes[NUM_PL_SEARCH_TREE_ORIGINATED_SPLITS] = 0; _unsignedAttributes[NUM_PRECISION_RESTORATIONS] = 0; _unsignedAttributes[CURRENT_DECISION_LEVEL] = 0; _unsignedAttributes[MAX_DECISION_LEVEL] = 0; @@ -43,6 +43,7 @@ Statistics::Statistics() _unsignedAttributes[NUM_CERTIFIED_LEAVES] = 0; _unsignedAttributes[NUM_DELEGATED_LEAVES] = 0; _unsignedAttributes[NUM_LEMMAS] = 0; + _unsignedAttributes[NUM_LEMMAS_USED] = 0; _unsignedAttributes[CERTIFIED_UNSAT] = 0; _longAttributes[NUM_MAIN_LOOP_ITERATIONS] = 0; @@ -82,7 +83,7 @@ Statistics::Statistics() _longAttributes[TOTAL_TIME_PRECISION_RESTORATION] = 0; _longAttributes[TOTAL_TIME_CONSTRAINT_MATRIX_BOUND_TIGHTENING_MICRO] = 0; _longAttributes[TOTAL_TIME_APPLYING_STORED_TIGHTENINGS_MICRO] = 0; - _longAttributes[TOTAL_TIME_SMT_CORE_MICRO] = 0; + _longAttributes[TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO] = 0; _longAttributes[TOTAL_TIME_UPDATING_SOI_PHASE_PATTERN_MICRO] = 0; _longAttributes[NUM_PROPOSED_PHASE_PATTERN_UPDATE] = 0; _longAttributes[NUM_ACCEPTED_PHASE_PATTERN_UPDATE] = 0; @@ -208,11 +209,11 @@ void Statistics::print() printf( "\t\t[%.2lf%%] Applying stored bound-tightening: %llu milli\n", printPercents( totalTimeApplyingStoredTighteningsMicro, timeMainLoopMicro ), totalTimeApplyingStoredTighteningsMicro / 1000 ); - unsigned long long totalTimeSmtCoreMicro = - getLongAttribute( Statistics::TOTAL_TIME_SMT_CORE_MICRO ); - printf( "\t\t[%.2lf%%] SMT core: %llu milli\n", - printPercents( totalTimeSmtCoreMicro, timeMainLoopMicro ), - totalTimeSmtCoreMicro / 1000 ); + unsigned long long totalTimeSearchTreeMicro = + getLongAttribute( Statistics::TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO ); + printf( "\t\t[%.2lf%%] Search Tree Handler : %llu milli\n", + printPercents( totalTimeSearchTreeMicro, timeMainLoopMicro ), + totalTimeSearchTreeMicro / 1000 ); unsigned long long totalTimePerformingSymbolicBoundTightening = getLongAttribute( Statistics::TOTAL_TIME_PERFORMING_SYMBOLIC_BOUND_TIGHTENING ); printf( "\t\t[%.2lf%%] Symbolic Bound Tightening: %llu milli\n", @@ -234,7 +235,7 @@ void Statistics::print() totalTimePerformingValidCaseSplitsMicro + totalTimeHandlingStatisticsMicro + totalTimeExplicitBasisBoundTighteningMicro + totalTimeDegradationChecking + totalTimePrecisionRestoration + totalTimeConstraintMatrixBoundTighteningMicro + - totalTimeApplyingStoredTighteningsMicro + totalTimeSmtCoreMicro + + totalTimeApplyingStoredTighteningsMicro + totalTimeSearchTreeMicro + totalTimePerformingSymbolicBoundTightening; printf( "\t\t[%.2lf%%] Unaccounted for: %llu milli\n", @@ -270,11 +271,11 @@ void Statistics::print() printAverage( timeConstraintFixingStepsMicro / 1000, numConstraintFixingSteps ) ); printf( "\tNumber of active piecewise-linear constraints: %u / %u\n" "\t\tConstraints disabled by valid splits: %u. " - "By SMT-originated splits: %u\n", + "By Search-Tree--originated splits: %u\n", getUnsignedAttribute( Statistics::NUM_ACTIVE_PL_CONSTRAINTS ), getUnsignedAttribute( Statistics::NUM_PL_CONSTRAINTS ), getUnsignedAttribute( Statistics::NUM_PL_VALID_SPLITS ), - getUnsignedAttribute( Statistics::NUM_PL_SMT_ORIGINATED_SPLITS ) ); + getUnsignedAttribute( Statistics::NUM_PL_SEARCH_TREE_ORIGINATED_SPLITS ) ); printf( "\tLast reported degradation: %.10lf. Max degradation so far: %.10lf. " "Restorations so far: %u\n", getDoubleAttribute( Statistics::CURRENT_DEGRADATION ), @@ -314,7 +315,7 @@ void Statistics::print() getUnsignedAttribute( Statistics::CURRENT_TABLEAU_M ), getUnsignedAttribute( Statistics::CURRENT_TABLEAU_N ) ); - printf( "\t--- SMT Core Statistics ---\n" ); + printf( "\t--- Search Tree Handler Statistics ---\n" ); printf( "\tTotal depth is %u. Total visited states: %u. Number of splits: %u. Number of pops: %u\n", getUnsignedAttribute( Statistics::CURRENT_DECISION_LEVEL ), @@ -428,6 +429,8 @@ void Statistics::print() printf( "\tNumber of leaves to delegate: %u\n", getUnsignedAttribute( Statistics::NUM_DELEGATED_LEAVES ) ); printf( "\tNumber of lemmas: %u\n", getUnsignedAttribute( Statistics::NUM_LEMMAS ) ); + printf( "\tNumber of lemmas used in proof minimization: %u\n", + getUnsignedAttribute( Statistics::NUM_LEMMAS_USED ) ); } unsigned long long Statistics::getTotalTimeInMicro() const diff --git a/src/common/Statistics.h b/src/common/Statistics.h index 4761594b4e..0454d9d22c 100644 --- a/src/common/Statistics.h +++ b/src/common/Statistics.h @@ -31,12 +31,12 @@ class Statistics NUM_PL_CONSTRAINTS, NUM_ACTIVE_PL_CONSTRAINTS, NUM_PL_VALID_SPLITS, - NUM_PL_SMT_ORIGINATED_SPLITS, + NUM_PL_SEARCH_TREE_ORIGINATED_SPLITS, // Precision restoration NUM_PRECISION_RESTORATIONS, - // Current and max stack depth in the SMT core + // Current and max stack depth in the Search Tree Handler CURRENT_DECISION_LEVEL, MAX_DECISION_LEVEL, @@ -71,6 +71,7 @@ class Statistics NUM_CERTIFIED_LEAVES, NUM_DELEGATED_LEAVES, NUM_LEMMAS, + NUM_LEMMAS_USED, // 1 if returned UNSAT and proof was certified by proof checker, 0 otherwise. CERTIFIED_UNSAT, @@ -188,8 +189,8 @@ class Statistics // Total amount of time spent applying previously stored bound tightenings TOTAL_TIME_APPLYING_STORED_TIGHTENINGS_MICRO, - // Total amount of time spent within the SMT core - TOTAL_TIME_SMT_CORE_MICRO, + // Total amount of time spent within the search tree core + TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO, // Total time heuristically updating the SoI phase pattern TOTAL_TIME_UPDATING_SOI_PHASE_PATTERN_MICRO, diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index 323983234f..3923dc09fc 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -62,7 +62,7 @@ const unsigned GlobalConfiguration::MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS = 5; const DivideStrategy GlobalConfiguration::SPLITTING_HEURISTICS = DivideStrategy::ReLUViolation; const unsigned GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY = 10; const unsigned GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD = 10; -const unsigned GlobalConfiguration::BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY = 100; +const unsigned GlobalConfiguration::BOUND_TIGHTENING_ON_CONSTRAINT_MATRIX_FREQUENCY = 100; const unsigned GlobalConfiguration::ROW_BOUND_TIGHTENER_SATURATION_ITERATIONS = 20; const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; @@ -126,6 +126,9 @@ const bool GlobalConfiguration::WRITE_JSON_PROOF = false; const unsigned GlobalConfiguration::BACKWARD_BOUND_PROPAGATION_DEPTH = 3; const unsigned GlobalConfiguration::MAX_ROUNDS_OF_BACKWARD_ANALYSIS = 10; +const bool GlobalConfiguration::ANALYZE_PROOF_DEPENDENCIES = false; +const bool GlobalConfiguration::MINIMIZE_PROOF_DEPENDENCIES = false; + #ifdef ENABLE_GUROBI const unsigned GlobalConfiguration::GUROBI_NUMBER_OF_THREADS = 1; const bool GlobalConfiguration::GUROBI_LOGGING = false; @@ -135,7 +138,7 @@ const bool GlobalConfiguration::GUROBI_LOGGING = false; const bool GlobalConfiguration::DNC_MANAGER_LOGGING = false; const bool GlobalConfiguration::ENGINE_LOGGING = false; const bool GlobalConfiguration::TABLEAU_LOGGING = false; -const bool GlobalConfiguration::SMT_CORE_LOGGING = false; +const bool GlobalConfiguration::SEARCH_TREE_HANDLER_LOGGING = false; const bool GlobalConfiguration::DANTZIGS_RULE_LOGGING = false; const bool GlobalConfiguration::BASIS_FACTORIZATION_LOGGING = false; const bool GlobalConfiguration::PREPROCESSOR_LOGGING = false; @@ -181,8 +184,8 @@ void GlobalConfiguration::print() printf( " GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD: %.15lf\n", GAUSSIAN_ELIMINATION_PIVOT_SCALE_THRESHOLD ); printf( " MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS: %u\n", MAX_SIMPLEX_PIVOT_SEARCH_ITERATIONS ); - printf( " BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY: %u\n", - BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY ); + printf( " BOUND_TIGHTENING_ON_CONSTRAINT_MATRIX_FREQUENCY: %u\n", + BOUND_TIGHTENING_ON_CONSTRAINT_MATRIX_FREQUENCY ); printf( " COST_FUNCTION_ERROR_THRESHOLD: %.15lf\n", COST_FUNCTION_ERROR_THRESHOLD ); printf( " USE_HARRIS_RATIO_TEST: %s\n", USE_HARRIS_RATIO_TEST ? "Yes" : "No" ); diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index ad8baf68c1..59d07b893f 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -139,7 +139,7 @@ class GlobalConfiguration static const unsigned INTERVAL_SPLITTING_THRESHOLD; // How often should we perform full bound tightening, on the entire contraints matrix A. - static const unsigned BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY; + static const unsigned BOUND_TIGHTENING_ON_CONSTRAINT_MATRIX_FREQUENCY; // When the row bound tightener is asked to run until saturation, it can enter an infinite loop // due to tiny increments in bounds. This number limits the number of iterations it can perform. @@ -284,6 +284,14 @@ class GlobalConfiguration */ static const unsigned MAX_ROUNDS_OF_BACKWARD_ANALYSIS; + /* Analyze lemma dependencies when producing proofs + */ + static const bool ANALYZE_PROOF_DEPENDENCIES; + + /* Minimize the number of lemma dependencies when producing proofs + */ + static const bool MINIMIZE_PROOF_DEPENDENCIES; + #ifdef ENABLE_GUROBI /* The number of threads Gurobi spawns @@ -298,7 +306,7 @@ class GlobalConfiguration static const bool DNC_MANAGER_LOGGING; static const bool ENGINE_LOGGING; static const bool TABLEAU_LOGGING; - static const bool SMT_CORE_LOGGING; + static const bool SEARCH_TREE_HANDLER_LOGGING; static const bool DANTZIGS_RULE_LOGGING; static const bool BASIS_FACTORIZATION_LOGGING; static const bool PREPROCESSOR_LOGGING; diff --git a/src/engine/AbsoluteValueConstraint.cpp b/src/engine/AbsoluteValueConstraint.cpp index a9763980ad..5954716b65 100644 --- a/src/engine/AbsoluteValueConstraint.cpp +++ b/src/engine/AbsoluteValueConstraint.cpp @@ -152,7 +152,9 @@ void AbsoluteValueConstraint::notifyLowerBound( unsigned variable, double bound Tightening::UB, { variable, variable }, Tightening::UB, - getType() ); + *this, + false, + fUpperBound ); else if ( proofs && phaseFixed() ) { std::shared_ptr tighteningRow = @@ -232,7 +234,9 @@ void AbsoluteValueConstraint::notifyUpperBound( unsigned variable, double bound Tightening::UB, { variable, variable }, Tightening::UB, - getType() ); + *this, + false, + fUpperBound ); else if ( proofs && phaseFixed() ) { std::shared_ptr tighteningRow = @@ -406,7 +410,7 @@ AbsoluteValueConstraint::getSmartFixes( ITableau * /* tableau */ ) const List AbsoluteValueConstraint::getCaseSplits() const { - ASSERT( _phaseStatus == PhaseStatus::PHASE_NOT_FIXED ); + ASSERT( getPhaseStatus() == PhaseStatus::PHASE_NOT_FIXED ); List splits; splits.append( getNegativeSplit() ); @@ -476,14 +480,14 @@ PiecewiseLinearCaseSplit AbsoluteValueConstraint::getPositiveSplit() const bool AbsoluteValueConstraint::phaseFixed() const { - return _phaseStatus != PhaseStatus::PHASE_NOT_FIXED; + return getPhaseStatus() != PhaseStatus::PHASE_NOT_FIXED; } PiecewiseLinearCaseSplit AbsoluteValueConstraint::getImpliedCaseSplit() const { - ASSERT( _phaseStatus != PHASE_NOT_FIXED ); + ASSERT( getPhaseStatus() != PHASE_NOT_FIXED ); - if ( _phaseStatus == ABS_PHASE_POSITIVE ) + if ( getPhaseStatus() == ABS_PHASE_POSITIVE ) return getPositiveSplit(); return getNegativeSplit(); @@ -512,8 +516,8 @@ void AbsoluteValueConstraint::dump( String &output ) const _f, _b, _constraintActive ? "Yes" : "No", - _phaseStatus, - phaseToString( _phaseStatus ).ascii() ); + getPhaseStatus(), + phaseToString( getPhaseStatus() ).ascii() ); output += Stringf( "b in [%s, %s], ", @@ -826,7 +830,7 @@ void AbsoluteValueConstraint::fixPhaseIfNeeded() setPhaseStatus( ABS_PHASE_POSITIVE ); if ( proofs ) _boundManager->addLemmaExplanationAndTightenBound( - _posAux, 0, Tightening::UB, { _b }, Tightening::LB, getType() ); + _posAux, 0, Tightening::UB, { _b }, Tightening::LB, *this, true, 0 ); return; } @@ -836,7 +840,7 @@ void AbsoluteValueConstraint::fixPhaseIfNeeded() setPhaseStatus( ABS_PHASE_NEGATIVE ); if ( proofs ) _boundManager->addLemmaExplanationAndTightenBound( - _negAux, 0, Tightening::UB, { _b }, Tightening::UB, getType() ); + _negAux, 0, Tightening::UB, { _b }, Tightening::UB, *this, true, 0 ); return; } @@ -849,8 +853,14 @@ void AbsoluteValueConstraint::fixPhaseIfNeeded() { setPhaseStatus( ABS_PHASE_NEGATIVE ); if ( proofs ) - _boundManager->addLemmaExplanationAndTightenBound( - _negAux, 0, Tightening::UB, { _b, _f }, Tightening::UB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( _negAux, + 0, + Tightening::UB, + { _b, _f }, + Tightening::UB, + *this, + true, + getUpperBound( _b ) ); return; } @@ -860,8 +870,14 @@ void AbsoluteValueConstraint::fixPhaseIfNeeded() { setPhaseStatus( ABS_PHASE_POSITIVE ); if ( proofs ) - _boundManager->addLemmaExplanationAndTightenBound( - _posAux, 0, Tightening::UB, { _b, _f }, Tightening::LB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( _posAux, + 0, + Tightening::UB, + { _b, _f }, + Tightening::LB, + *this, + true, + -getLowerBound( _b ) ); return; } @@ -871,6 +887,9 @@ void AbsoluteValueConstraint::fixPhaseIfNeeded() if ( existsUpperBound( _posAux ) && FloatUtils::isZero( getUpperBound( _posAux ) ) ) { setPhaseStatus( ABS_PHASE_POSITIVE ); + if ( proofs ) + _boundManager->addLemmaExplanationAndTightenBound( + _posAux, 0, Tightening::UB, { _posAux }, Tightening::UB, *this, true, 0 ); return; } @@ -880,7 +899,7 @@ void AbsoluteValueConstraint::fixPhaseIfNeeded() setPhaseStatus( ABS_PHASE_NEGATIVE ); if ( proofs ) _boundManager->addLemmaExplanationAndTightenBound( - _negAux, 0, Tightening::UB, { _posAux }, Tightening::LB, getType() ); + _negAux, 0, Tightening::UB, { _posAux }, Tightening::LB, *this, true, 0 ); return; } @@ -888,6 +907,9 @@ void AbsoluteValueConstraint::fixPhaseIfNeeded() if ( existsUpperBound( _negAux ) && FloatUtils::isZero( getUpperBound( _negAux ) ) ) { setPhaseStatus( ABS_PHASE_NEGATIVE ); + if ( proofs ) + _boundManager->addLemmaExplanationAndTightenBound( + _negAux, 0, Tightening::UB, { _negAux }, Tightening::UB, *this, true, 0 ); return; } @@ -897,7 +919,7 @@ void AbsoluteValueConstraint::fixPhaseIfNeeded() setPhaseStatus( ABS_PHASE_POSITIVE ); if ( proofs ) _boundManager->addLemmaExplanationAndTightenBound( - _posAux, 0, Tightening::UB, { _negAux }, Tightening::LB, getType() ); + _posAux, 0, Tightening::UB, { _negAux }, Tightening::LB, *this, true, 0 ); return; } } diff --git a/src/engine/BoundManager.cpp b/src/engine/BoundManager.cpp index 5c8c9d1562..3fd03cd992 100644 --- a/src/engine/BoundManager.cpp +++ b/src/engine/BoundManager.cpp @@ -17,7 +17,6 @@ #include "Debug.h" #include "FloatUtils.h" -#include "InfeasibleQueryException.h" #include "MarabouError.h" #include "Tableau.h" #include "Tightening.h" @@ -417,7 +416,9 @@ bool BoundManager::addLemmaExplanationAndTightenBound( unsigned var, Tightening::BoundType affectedVarBound, const List &causingVars, Tightening::BoundType causingVarBound, - PiecewiseLinearFunctionType constraintType ) + PiecewiseLinearConstraint &constraint, + bool /*isPhaseFixing*/, + double minTargetBound ) { if ( !shouldProduceProofs() ) return false; @@ -432,12 +433,13 @@ bool BoundManager::addLemmaExplanationAndTightenBound( unsigned var, if ( tightened ) { - if ( constraintType == RELU || constraintType == SIGN || constraintType == LEAKY_RELU ) + if ( constraint.getType() == RELU || constraint.getType() == SIGN || + constraint.getType() == LEAKY_RELU ) { ASSERT( causingVars.size() == 1 ); allExplanations.append( getExplanation( causingVars.front(), causingVarBound ) ); } - else if ( constraintType == ABSOLUTE_VALUE ) + else if ( constraint.getType() == ABSOLUTE_VALUE ) { if ( causingVars.size() == 1 ) allExplanations.append( getExplanation( causingVars.front(), causingVarBound ) ); @@ -456,7 +458,7 @@ bool BoundManager::addLemmaExplanationAndTightenBound( unsigned var, allExplanations.append( getExplanation( causingVars.back(), Tightening::LB ) ); } } - else if ( constraintType == MAX ) + else if ( constraint.getType() == MAX ) for ( const auto &element : causingVars ) allExplanations.append( getExplanation( element, Tightening::UB ) ); else @@ -468,12 +470,22 @@ bool BoundManager::addLemmaExplanationAndTightenBound( unsigned var, causingVarBound, affectedVarBound, allExplanations, - constraintType ); - _engine->addPLCLemma( PLCExpl ); + constraint.getType(), + minTargetBound ); + + + _engine->getUNSATCertificateCurrentPointer()->addPLCLemma( PLCExpl ); affectedVarBound == Tightening::UB ? _engine->updateGroundUpperBound( var, value ) : _engine->updateGroundLowerBound( var, value ); + + PLCExpl->setToCheck(); + + // Add ground bound entry to the GroundBoundManager resetExplanation( var, affectedVarBound ); + + _engine->incNumOfLemmas(); } + return true; } diff --git a/src/engine/BoundManager.h b/src/engine/BoundManager.h index 2808e1bbfc..cf04ba05dd 100644 --- a/src/engine/BoundManager.h +++ b/src/engine/BoundManager.h @@ -260,7 +260,9 @@ class BoundManager : public IBoundManager Tightening::BoundType affectedVarBound, const List &causingVars, Tightening::BoundType causingVarBound, - PiecewiseLinearFunctionType constraintType ); + PiecewiseLinearConstraint &constraint, + bool isPhaseFixing, + double minTargetBound ); /* Explainer of all bounds diff --git a/src/engine/CDSmtCore.cpp b/src/engine/CDSearchTreeHandler.cpp similarity index 79% rename from src/engine/CDSmtCore.cpp rename to src/engine/CDSearchTreeHandler.cpp index 6fb9967fed..4287001ccf 100644 --- a/src/engine/CDSmtCore.cpp +++ b/src/engine/CDSearchTreeHandler.cpp @@ -1,5 +1,5 @@ /********************* */ -/*! \file CDSmtCore.cpp +/*! \file CDSearchTreeHandler.cpp ** \verbatim ** Top contributors (to current version): ** Guy Katz, Aleksandar Zeljic, Haoze Wu, Parth Shah @@ -9,10 +9,10 @@ ** All rights reserved. See the file COPYING in the top-level source ** directory for licensing information.\endverbatim ** - ** See the description of the class in CDSmtCore.h. + ** See the description of the class in CDSearchTreeHandler.h. **/ -#include "CDSmtCore.h" +#include "CDSearchTreeHandler.h" #include "Debug.h" #include "DivideStrategy.h" @@ -27,7 +27,7 @@ using namespace CVC4::context; -CDSmtCore::CDSmtCore( IEngine *engine, Context &ctx ) +CDSearchTreeHandler::CDSearchTreeHandler( IEngine *engine, Context &ctx ) : _statistics( NULL ) , _context( ctx ) , _trail( &_context ) @@ -43,11 +43,11 @@ CDSmtCore::CDSmtCore( IEngine *engine, Context &ctx ) { } -CDSmtCore::~CDSmtCore() +CDSearchTreeHandler::~CDSearchTreeHandler() { } -void CDSmtCore::reportViolatedConstraint( PiecewiseLinearConstraint *constraint ) +void CDSearchTreeHandler::reportViolatedConstraint( PiecewiseLinearConstraint *constraint ) { ASSERT( !constraint->phaseFixed() ); @@ -68,7 +68,7 @@ void CDSmtCore::reportViolatedConstraint( PiecewiseLinearConstraint *constraint } } -unsigned CDSmtCore::getViolationCounts( PiecewiseLinearConstraint *constraint ) const +unsigned CDSearchTreeHandler::getViolationCounts( PiecewiseLinearConstraint *constraint ) const { if ( !_constraintToViolationCount.exists( constraint ) ) return 0; @@ -76,7 +76,7 @@ unsigned CDSmtCore::getViolationCounts( PiecewiseLinearConstraint *constraint ) return _constraintToViolationCount[constraint]; } -void CDSmtCore::initializeScoreTrackerIfNeeded( +void CDSearchTreeHandler::initializeScoreTrackerIfNeeded( const List &plConstraints ) { if ( GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH ) @@ -84,11 +84,11 @@ void CDSmtCore::initializeScoreTrackerIfNeeded( _scoreTracker = std::unique_ptr( new PseudoImpactTracker() ); _scoreTracker->initialize( plConstraints ); - SMT_LOG( "\tTracking Pseudo Impact..." ); + CD_SEARCH_TREE_LOG( "\tTracking Pseudo Impact..." ); } } -void CDSmtCore::reportRejectedPhasePatternProposal() +void CDSearchTreeHandler::reportRejectedPhasePatternProposal() { ++_numRejectedPhasePatternProposal; @@ -102,29 +102,30 @@ void CDSmtCore::reportRejectedPhasePatternProposal() } } -bool CDSmtCore::needToSplit() const +bool CDSearchTreeHandler::needToSplit() const { return _needToSplit; } -void CDSmtCore::pushDecision( PiecewiseLinearConstraint *constraint, PhaseStatus decision ) +void CDSearchTreeHandler::pushDecision( PiecewiseLinearConstraint *constraint, + PhaseStatus decision ) { - SMT_LOG( Stringf( "Decision @ %d )", _context.getLevel() + 1 ).ascii() ); + CD_SEARCH_TREE_LOG( Stringf( "Decision @ %d )", _context.getLevel() + 1 ).ascii() ); TrailEntry te( constraint, decision ); applyTrailEntry( te, true ); - SMT_LOG( Stringf( "Decision push @ %d DONE", _context.getLevel() ).ascii() ); + CD_SEARCH_TREE_LOG( Stringf( "Decision push @ %d DONE", _context.getLevel() ).ascii() ); } -void CDSmtCore::pushImplication( PiecewiseLinearConstraint *constraint ) +void CDSearchTreeHandler::pushImplication( PiecewiseLinearConstraint *constraint ) { ASSERT( constraint->isImplication() ); - SMT_LOG( Stringf( "Implication @ %d ... ", _context.getLevel() ).ascii() ); + CD_SEARCH_TREE_LOG( Stringf( "Implication @ %d ... ", _context.getLevel() ).ascii() ); TrailEntry te( constraint, constraint->nextFeasibleCase() ); applyTrailEntry( te, false ); - SMT_LOG( Stringf( "Implication @ %d DONE", _context.getLevel() ).ascii() ); + CD_SEARCH_TREE_LOG( Stringf( "Implication @ %d DONE", _context.getLevel() ).ascii() ); } -void CDSmtCore::applyTrailEntry( TrailEntry &te, bool isDecision ) +void CDSearchTreeHandler::applyTrailEntry( TrailEntry &te, bool isDecision ) { if ( isDecision ) { @@ -136,10 +137,10 @@ void CDSmtCore::applyTrailEntry( TrailEntry &te, bool isDecision ) _engine->applySplit( te.getPiecewiseLinearCaseSplit() ); } -void CDSmtCore::decide() +void CDSearchTreeHandler::decide() { ASSERT( _needToSplit ); - SMT_LOG( "Performing a ReLU split" ); + CD_SEARCH_TREE_LOG( "Performing a ReLU split" ); _numRejectedPhasePatternProposal = 0; // Maybe the constraint has already become inactive - if so, ignore @@ -161,7 +162,7 @@ void CDSmtCore::decide() decideSplit( _constraintForSplitting ); } -void CDSmtCore::decideSplit( PiecewiseLinearConstraint *constraint ) +void CDSearchTreeHandler::decideSplit( PiecewiseLinearConstraint *constraint ) { struct timespec start = TimeUtils::sampleMicro(); @@ -187,51 +188,51 @@ void CDSmtCore::decideSplit( PiecewiseLinearConstraint *constraint ) _statistics->setUnsignedAttribute( Statistics::MAX_DECISION_LEVEL, level ); struct timespec end = TimeUtils::sampleMicro(); - _statistics->incLongAttribute( Statistics::TOTAL_TIME_SMT_CORE_MICRO, + _statistics->incLongAttribute( Statistics::TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO, TimeUtils::timePassed( start, end ) ); } - SMT_LOG( "Performing a ReLU split - DONE" ); + CD_SEARCH_TREE_LOG( "Performing a ReLU split - DONE" ); } -unsigned CDSmtCore::getDecisionLevel() const +unsigned CDSearchTreeHandler::getDecisionLevel() const { return _decisions.size(); } -bool CDSmtCore::popDecisionLevel( TrailEntry &lastDecision ) +bool CDSearchTreeHandler::popDecisionLevel( TrailEntry &lastDecision ) { // ASSERT( static_cast( _context.getLevel() ) == _decisions.size() ); if ( _decisions.empty() ) return false; - SMT_LOG( "Popping trail ..." ); + CD_SEARCH_TREE_LOG( "Popping trail ..." ); lastDecision = _decisions.back(); _context.pop(); _engine->postContextPopHook(); - SMT_LOG( Stringf( "to %d DONE", _context.getLevel() ).ascii() ); + CD_SEARCH_TREE_LOG( Stringf( "to %d DONE", _context.getLevel() ).ascii() ); return true; } -void CDSmtCore::interruptIfCompliantWithDebugSolution() +void CDSearchTreeHandler::interruptIfCompliantWithDebugSolution() { if ( checkSkewFromDebuggingSolution() ) { - SMT_LOG( "Error! Popping from a compliant stack\n" ); + CD_SEARCH_TREE_LOG( "Error! Popping from a compliant stack\n" ); throw MarabouError( MarabouError::DEBUGGING_ERROR ); } } -PiecewiseLinearCaseSplit CDSmtCore::getDecision( unsigned decisionLevel ) const +PiecewiseLinearCaseSplit CDSearchTreeHandler::getDecision( unsigned decisionLevel ) const { ASSERT( decisionLevel <= getDecisionLevel() ); ASSERT( decisionLevel > 0 ); return _decisions[decisionLevel - 1].getPiecewiseLinearCaseSplit(); } -bool CDSmtCore::backtrackToFeasibleDecision( TrailEntry &lastDecision ) +bool CDSearchTreeHandler::backtrackToFeasibleDecision( TrailEntry &lastDecision ) { - SMT_LOG( "Backtracking to a feasible decision..." ); + CD_SEARCH_TREE_LOG( "Backtracking to a feasible decision..." ); if ( getDecisionLevel() == 0 ) return false; @@ -254,7 +255,7 @@ bool CDSmtCore::backtrackToFeasibleDecision( TrailEntry &lastDecision ) return true; } -bool CDSmtCore::backtrackAndContinueSearch() +bool CDSearchTreeHandler::backtrackAndContinueSearch() { TrailEntry feasibleDecision( nullptr, CONSTRAINT_INFEASIBLE ); struct timespec start = TimeUtils::sampleMicro(); @@ -280,7 +281,7 @@ bool CDSmtCore::backtrackAndContinueSearch() if ( level > _statistics->getUnsignedAttribute( Statistics::MAX_DECISION_LEVEL ) ) _statistics->setUnsignedAttribute( Statistics::MAX_DECISION_LEVEL, level ); struct timespec end = TimeUtils::sampleMicro(); - _statistics->incLongAttribute( Statistics::TOTAL_TIME_SMT_CORE_MICRO, + _statistics->incLongAttribute( Statistics::TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO, TimeUtils::timePassed( start, end ) ); } @@ -288,14 +289,14 @@ bool CDSmtCore::backtrackAndContinueSearch() return true; } -void CDSmtCore::resetReportedViolations() +void CDSearchTreeHandler::resetReportedViolations() { _constraintToViolationCount.clear(); _numRejectedPhasePatternProposal = 0; _needToSplit = false; } -void CDSmtCore::allSplitsSoFar( List &result ) const +void CDSearchTreeHandler::allSplitsSoFar( List &result ) const { result.clear(); @@ -303,19 +304,19 @@ void CDSmtCore::allSplitsSoFar( List &result ) const result.append( trailEntry.getPiecewiseLinearCaseSplit() ); } -void CDSmtCore::setStatistics( Statistics *statistics ) +void CDSearchTreeHandler::setStatistics( Statistics *statistics ) { _statistics = statistics; } -void CDSmtCore::storeDebuggingSolution( const Map &debuggingSolution ) +void CDSearchTreeHandler::storeDebuggingSolution( const Map &debuggingSolution ) { _debuggingSolution = debuggingSolution; } // Return true if stack is currently compliant, false otherwise // If there is no stored solution, return false --- incompliant. -bool CDSmtCore::checkSkewFromDebuggingSolution() +bool CDSearchTreeHandler::checkSkewFromDebuggingSolution() { if ( _debuggingSolution.empty() ) return false; @@ -372,8 +373,8 @@ bool CDSmtCore::checkSkewFromDebuggingSolution() return true; } -bool CDSmtCore::splitAllowsStoredSolution( const PiecewiseLinearCaseSplit &split, - String &error ) const +bool CDSearchTreeHandler::splitAllowsStoredSolution( const PiecewiseLinearCaseSplit &split, + String &error ) const { // False if the split prevents one of the values in the stored solution, true otherwise. error = ""; @@ -416,12 +417,12 @@ bool CDSmtCore::splitAllowsStoredSolution( const PiecewiseLinearCaseSplit &split return true; } -void CDSmtCore::setConstraintViolationThreshold( unsigned threshold ) +void CDSearchTreeHandler::setConstraintViolationThreshold( unsigned threshold ) { _constraintViolationThreshold = threshold; } -PiecewiseLinearConstraint *CDSmtCore::chooseViolatedConstraintForFixing( +PiecewiseLinearConstraint *CDSearchTreeHandler::chooseViolatedConstraintForFixing( List &_violatedPlConstraints ) const { ASSERT( !_violatedPlConstraints.empty() ); @@ -455,14 +456,14 @@ PiecewiseLinearConstraint *CDSmtCore::chooseViolatedConstraintForFixing( return candidate; } -bool CDSmtCore::pickSplitPLConstraint() +bool CDSearchTreeHandler::pickSplitPLConstraint() { if ( _needToSplit ) _constraintForSplitting = _engine->pickSplitPLConstraint( _branchingHeuristic ); return _constraintForSplitting != NULL; } -void CDSmtCore::reset() +void CDSearchTreeHandler::reset() { _context.popto( 0 ); _engine->postContextPopHook(); diff --git a/src/engine/CDSmtCore.h b/src/engine/CDSearchTreeHandler.h similarity index 86% rename from src/engine/CDSmtCore.h rename to src/engine/CDSearchTreeHandler.h index 8cbeebc5c0..2c0d0d97e4 100644 --- a/src/engine/CDSmtCore.h +++ b/src/engine/CDSearchTreeHandler.h @@ -1,5 +1,5 @@ /********************* */ -/*! \file CDSmtCore.h +/*! \file CDSearchTreeHandler.h ** \verbatim ** Top contributors (to current version): ** Guy Katz, AleksandarZeljic, Haoze Wu, Parth Shah @@ -9,8 +9,8 @@ ** All rights reserved. See the file COPYING in the top-level source ** directory for licensing information.\endverbatim ** - ** The CDSmtCore class implements a context-dependent SmtCore class. - ** The CDSmtCore distinguishes between: **decisions** and **implications**. + ** The CDSearchTreeHandler class implements a context-dependent CDSearchTreeHandler class. + ** The CDSearchTreeHandler distinguishes between: **decisions** and **implications**. ** ** Decision is a case of PiecewiseLinearConstraint asserted on the trail. ** A decision is a choice between multiple feasible cases of a @@ -24,7 +24,7 @@ ** by exhausting all other cases) performs an implication. ** ** The overall search state is stored in a distributed way: - ** - CDSmtCore::_trail is the current search state in a chronological order + ** - CDSearchTreeHandler::_trail is the current search state in a chronological order ** - PiecewiseLinearConstraints' infeasible cases enumerate all the explored ** states w.r.t to the chronological order on the _trail. ** @@ -34,7 +34,7 @@ ** _trail and _decisions are both context dependent and will synchronize in ** unison with the _context object. ** - ** When a search state is found to be infeasible, CDSmtCore backtracks to the + ** When a search state is found to be infeasible, CDSearchTreeHandler backtracks to the ** last decision and continues the search. ** ** PushDecision advances the decision level and context level. @@ -63,8 +63,8 @@ ** - Using BoundManager class to store bounds in a context-dependent manner **/ -#ifndef __CDSmtCore_h__ -#define __CDSmtCore_h__ +#ifndef __CDSearchTreeHandler_h__ +#define __CDSearchTreeHandler_h__ #include "Options.h" #include "PLConstraintScoreTracker.h" @@ -76,17 +76,18 @@ #include "context/cdlist.h" #include "context/context.h" -#define SMT_LOG( x, ... ) LOG( GlobalConfiguration::SMT_CORE_LOGGING, "CDSmtCore: %s\n", x ) +#define CD_SEARCH_TREE_LOG( x, ... ) \ + LOG( GlobalConfiguration::SEARCH_TREE_HANDLER_LOGGING, "CDSearchTreeHandler: %s\n", x ) class EngineState; class Engine; class String; -class CDSmtCore +class CDSearchTreeHandler { public: - CDSmtCore( IEngine *engine, CVC4::context::Context &context ); - ~CDSmtCore(); + CDSearchTreeHandler( IEngine *engine, CVC4::context::Context &context ); + ~CDSearchTreeHandler(); /* Clear the stack. @@ -99,7 +100,7 @@ class CDSmtCore void initializeScoreTrackerIfNeeded( const List &plConstraints ); /* - Inform the SMT core that a SoI phase pattern proposal is rejected. + Inform the Search Tree Handler that a SoI phase pattern proposal is rejected. */ void reportRejectedPhasePatternProposal(); @@ -121,7 +122,7 @@ class CDSmtCore } /* - Inform the SMT core that a PL constraint is violated. + Inform the Search Tree Handler that a PL constraint is violated. */ void reportViolatedConstraint( PiecewiseLinearConstraint *constraint ); @@ -137,7 +138,7 @@ class CDSmtCore void resetReportedViolations(); /* - Returns true iff the SMT core wants to perform a case split. + Returns true iff the Search Tree Handler wants to perform a case split. */ bool needToSplit() const; @@ -147,7 +148,7 @@ class CDSmtCore void pushDecision( PiecewiseLinearConstraint *constraint, PhaseStatus decision ); /* - Inform SmtCore of an implied (formerly valid) case split that was discovered. + Inform Search Tree Handler of an implied (formerly valid) case split that was discovered. */ void pushImplication( PiecewiseLinearConstraint *constraint ); @@ -196,7 +197,7 @@ class CDSmtCore unsigned getDecisionLevel() const; /* - Return a list of all splits performed so far, both SMT-originating and + Return a list of all splits performed so far, both Search Tree Handler-originating and valid ones, in the correct order. */ void allSplitsSoFar( List &result ) const; @@ -218,12 +219,12 @@ class CDSmtCore }; /* - Have the SMT core start reporting statistics. + Have the Search Tree Handler start reporting statistics. */ void setStatistics( Statistics *statistics ); /* - Have the SMT core choose, among a set of violated PL constraints, which + Have the Search Tree Handler choose, among a set of violated PL constraints, which constraint should be repaired (without splitting) */ PiecewiseLinearConstraint *chooseViolatedConstraintForFixing( @@ -329,4 +330,4 @@ class CDSmtCore unsigned _numRejectedPhasePatternProposal; }; -#endif // __CDSmtCore_h__ +#endif // __CDSearchTreeHandler_h__ diff --git a/src/engine/CMakeLists.txt b/src/engine/CMakeLists.txt index 487d806d3c..6563c0517c 100644 --- a/src/engine/CMakeLists.txt +++ b/src/engine/CMakeLists.txt @@ -8,12 +8,12 @@ target_include_directories(${MARABOU_LIB} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") target_sources(${MARABOU_TEST_LIB} PRIVATE ${SRCS}) target_include_directories(${MARABOU_TEST_LIB} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}") -set (ENGINE_TESTS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tests") +set(ENGINE_TESTS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tests") macro(engine_add_unit_test name) set(USE_MOCK_COMMON FALSE) set(USE_MOCK_ENGINE FALSE) marabou_add_test(${ENGINE_TESTS_DIR}/Test_${name} engine USE_MOCK_COMMON - USE_MOCK_ENGINE "unit") + USE_MOCK_ENGINE "unit") endmacro() engine_add_unit_test(AbsoluteValueConstraint) @@ -41,10 +41,10 @@ engine_add_unit_test(Query) engine_add_unit_test(ReluConstraint) engine_add_unit_test(RoundConstraint) engine_add_unit_test(RowBoundTightener) +engine_add_unit_test(SearchTreeHandler) engine_add_unit_test(SignConstraint) engine_add_unit_test(SigmoidConstraint) engine_add_unit_test(SoftmaxConstraint) -engine_add_unit_test(SmtCore) engine_add_unit_test(SumOfInfeasibilitiesManager) engine_add_unit_test(Tableau) engine_add_unit_test(BaBsrSplitting) diff --git a/src/engine/DnCWorker.cpp b/src/engine/DnCWorker.cpp index 4765eae5f1..367ebe1b32 100644 --- a/src/engine/DnCWorker.cpp +++ b/src/engine/DnCWorker.cpp @@ -85,9 +85,9 @@ void DnCWorker::popOneSubQueryAndSolve( bool restoreTreeStates ) String queryId = subQuery->_queryId; unsigned depth = subQuery->_depth; auto split = std::move( subQuery->_split ); - std::unique_ptr smtState = nullptr; - if ( restoreTreeStates && subQuery->_smtState ) - smtState = std::move( subQuery->_smtState ); + std::unique_ptr searchTreeState = nullptr; + if ( restoreTreeStates && subQuery->_searchTreeState ) + searchTreeState = std::move( subQuery->_searchTreeState ); unsigned timeoutInSeconds = subQuery->_timeoutInSeconds; // Reset the engine state @@ -102,8 +102,8 @@ void DnCWorker::popOneSubQueryAndSolve( bool restoreTreeStates ) _engine->applySnCSplit( *split, queryId ); bool fullSolveNeeded = true; // denotes whether we need to solve the subquery - if ( restoreTreeStates && smtState ) - fullSolveNeeded = _engine->restoreSmtState( *smtState ); + if ( restoreTreeStates && searchTreeState ) + fullSolveNeeded = _engine->restoreSearchTreeState( *searchTreeState ); IEngine::ExitCode result = IEngine::NOT_DONE; if ( fullSolveNeeded ) { @@ -136,14 +136,15 @@ void DnCWorker::popOneSubQueryAndSolve( bool restoreTreeStates ) ? 0 : (unsigned)timeoutInSeconds * _timeoutFactor ); unsigned numNewSubQueries = pow( 2, _onlineDivides ); - std::vector> newSmtStates; + std::vector> newSearchTreeStates; if ( restoreTreeStates ) { - // create |numNewSubQueries| copies of the current SmtState + // create |numNewSubQueries| copies of the current SearchTreeState for ( unsigned i = 0; i < numNewSubQueries; ++i ) { - newSmtStates.push_back( std::unique_ptr( new SmtState() ) ); - _engine->storeSmtState( *( newSmtStates[i] ) ); + newSearchTreeStates.push_back( + std::unique_ptr( new SearchTreeState() ) ); + _engine->storeSearchTreeState( *( newSearchTreeStates[i] ) ); } } @@ -153,10 +154,10 @@ void DnCWorker::popOneSubQueryAndSolve( bool restoreTreeStates ) unsigned i = 0; for ( auto &newSubQuery : subQueries ) { - // Store the SmtCore state + // Store the SearchTreeHandler state if ( restoreTreeStates ) { - newSubQuery->_smtState = std::move( newSmtStates[i++] ); + newSubQuery->_searchTreeState = std::move( newSearchTreeStates[i++] ); } if ( !_workload->push( std::move( newSubQuery ) ) ) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index a3d0d82aeb..423a27f723 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -41,7 +41,7 @@ Engine::Engine() , _tableau( _boundManager ) , _preprocessedQuery( nullptr ) , _rowBoundTightener( *_tableau ) - , _smtCore( this ) + , _searchTreeHandler( this ) , _numPlConstraintsDisabledByValidSplits( 0 ) , _preprocessingEnabled( false ) , _initialStateStored( false ) @@ -73,7 +73,7 @@ Engine::Engine() , _groundBoundManager( _context ) , _UNSATCertificate( NULL ) { - _smtCore.setStatistics( &_statistics ); + _searchTreeHandler.setStatistics( &_statistics ); _tableau->setStatistics( &_statistics ); _rowBoundTightener->setStatistics( &_statistics ); _preprocessor.setStatistics( &_statistics ); @@ -137,7 +137,7 @@ void Engine::applySnCSplit( PiecewiseLinearCaseSplit sncSplit, String queryId ) _sncSplit = sncSplit; _queryId = queryId; preContextPushHook(); - _smtCore.pushContext(); + _searchTreeHandler.pushContext(); applySplit( sncSplit ); _boundManager.propagateTightenings(); } @@ -289,7 +289,7 @@ bool Engine::solve( double timeoutInSeconds ) } } - // If true, we just entered a new subproblem + // If true, we just entered a new sub-problem if ( splitJustPerformed ) { performBoundTighteningAfterCaseSplit(); @@ -297,10 +297,9 @@ bool Engine::solve( double timeoutInSeconds ) splitJustPerformed = false; } - // Perform any SmtCore-initiated case splits - if ( _smtCore.needToSplit() ) + if ( _searchTreeHandler.needToSplit() ) { - _smtCore.performSplit(); + _searchTreeHandler.performSplit(); splitJustPerformed = true; continue; } @@ -363,8 +362,8 @@ bool Engine::solve( double timeoutInSeconds ) } else { - while ( !_smtCore.needToSplit() ) - _smtCore.reportRejectedPhasePatternProposal(); + while ( !_searchTreeHandler.needToSplit() ) + _searchTreeHandler.reportRejectedPhasePatternProposal(); continue; } } @@ -409,7 +408,7 @@ bool Engine::solve( double timeoutInSeconds ) if ( _produceUNSATProofs ) explainSimplexFailure(); - if ( !_smtCore.popSplit() ) + if ( !_searchTreeHandler.popSplit() ) { mainLoopEnd = TimeUtils::sampleMicro(); _statistics.incLongAttribute( Statistics::TIME_MAIN_LOOP_MICRO, @@ -467,7 +466,7 @@ void Engine::mainLoopStatistics() _statistics.setUnsignedAttribute( Statistics::NUM_ACTIVE_PL_CONSTRAINTS, activeConstraints ); _statistics.setUnsignedAttribute( Statistics::NUM_PL_VALID_SPLITS, _numPlConstraintsDisabledByValidSplits ); - _statistics.setUnsignedAttribute( Statistics::NUM_PL_SMT_ORIGINATED_SPLITS, + _statistics.setUnsignedAttribute( Statistics::NUM_PL_SEARCH_TREE_ORIGINATED_SPLITS, _plConstraints.size() - activeConstraints - _numPlConstraintsDisabledByValidSplits ); @@ -565,7 +564,7 @@ bool Engine::performPrecisionRestorationIfNeeded() // Restoration is not required _basisRestorationPerformed = Engine::NO_RESTORATION_PERFORMED; - // Possible restoration due to preceision degradation + // Possible restoration due to precision degradation if ( shouldCheckDegradation() && highDegradation() ) { performPrecisionRestoration( PrecisionRestorer::RESTORE_BASICS ); @@ -613,7 +612,7 @@ void Engine::performConstraintFixingStep() // Select a violated constraint as the target selectViolatedPlConstraint(); - // Report the violated constraint to the SMT engine + // Report the violated constraint to the Search Tree engine reportPlViolation(); // Attempt to fix the constraint @@ -1114,7 +1113,7 @@ void Engine::selectInitialVariablesForBasis( const double *constraintMatrix, /* This method permutes rows and columns in the constraint matrix (prior to the addition of auxiliary variables), in order to obtain a set of - column that constitue a lower triangular matrix. The variables + column that constitute a lower triangular matrix. The variables corresponding to the columns of this matrix join the initial basis. (It is possible that not enough variables are obtained this way, in which @@ -1339,7 +1338,7 @@ void Engine::initializeTableau( const double *constraintMatrix, const ListsetConstraintMatrix( constraintMatrix ); _tableau->registerToWatchAllVariables( _rowBoundTightener ); @@ -1512,7 +1511,11 @@ bool Engine::processInputQuery( const IQuery &inputQuery, bool preprocess ) } for ( const auto &constraint : _plConstraints ) + { constraint->registerTableau( _tableau ); + if ( !Options::get()->getBool( Options::DNC_MODE ) ) + constraint->initializeCDOs( &_context ); + } for ( const auto &constraint : _nlConstraints ) constraint->registerTableau( _tableau ); @@ -1563,7 +1566,7 @@ bool Engine::processInputQuery( const IQuery &inputQuery, bool preprocess ) } } ); - _smtCore.storeDebuggingSolution( _preprocessedQuery->_debuggingSolution ); + _searchTreeHandler.storeDebuggingSolution( _preprocessedQuery->_debuggingSolution ); return true; } @@ -1571,7 +1574,7 @@ void Engine::performMILPSolverBoundedTightening( Query *inputQuery ) { if ( _networkLevelReasoner && Options::get()->gurobiEnabled() ) { - // Obtain from and store bounds into inputquery if it is not null. + // Obtain from and store bounds into inputQuery if it is not null. if ( inputQuery ) _networkLevelReasoner->obtainCurrentBounds( *inputQuery ); else @@ -1814,14 +1817,15 @@ void Engine::selectViolatedPlConstraint() { ASSERT( !_violatedPlConstraints.empty() ); - _plConstraintToFix = _smtCore.chooseViolatedConstraintForFixing( _violatedPlConstraints ); + _plConstraintToFix = + _searchTreeHandler.chooseViolatedConstraintForFixing( _violatedPlConstraints ); ASSERT( _plConstraintToFix ); } void Engine::reportPlViolation() { - _smtCore.reportViolatedConstraint( _plConstraintToFix ); + _searchTreeHandler.reportViolatedConstraint( _plConstraintToFix ); } void Engine::storeState( EngineState &state, TableauStateStorageLevel level ) const @@ -1830,7 +1834,8 @@ void Engine::storeState( EngineState &state, TableauStateStorageLevel level ) co state._tableauStateStorageLevel = level; for ( const auto &constraint : _plConstraints ) - state._plConstraintToState[constraint] = constraint->duplicateConstraint(); + if ( !constraint->getContext() ) + state._plConstraintToState[constraint] = constraint->duplicateConstraint(); state._numPlConstraintsDisabledByValidSplits = _numPlConstraintsDisabledByValidSplits; } @@ -1848,10 +1853,11 @@ void Engine::restoreState( const EngineState &state ) ENGINE_LOG( "\tRestoring constraint states" ); for ( auto &constraint : _plConstraints ) { - if ( !state._plConstraintToState.exists( constraint ) ) + if ( !state._plConstraintToState.exists( constraint ) && !constraint->getContext() ) throw MarabouError( MarabouError::MISSING_PL_CONSTRAINT_STATE ); - constraint->restoreState( state._plConstraintToState[constraint] ); + if ( !constraint->getContext() ) + constraint->restoreState( state._plConstraintToState[constraint] ); } _numPlConstraintsDisabledByValidSplits = state._numPlConstraintsDisabledByValidSplits; @@ -1865,8 +1871,8 @@ void Engine::restoreState( const EngineState &state ) _costFunctionManager->initialize(); } - // Reset the violation counts in the SMT core - _smtCore.resetSplitConditions(); + // Reset the violation counts in the Search Tree handler + _searchTreeHandler.resetSplitConditions(); } void Engine::setNumPlConstraintsDisabledByValidSplits( unsigned numConstraints ) @@ -2183,7 +2189,7 @@ bool Engine::applyValidConstraintCaseSplit( PiecewiseLinearConstraint *constrain constraint->setActiveConstraint( false ); PiecewiseLinearCaseSplit validSplit = constraint->getValidCaseSplit(); - _smtCore.recordImpliedValidSplit( validSplit ); + _searchTreeHandler.recordImpliedValidSplit( validSplit ); applySplit( validSplit ); if ( _soiManager ) @@ -2232,7 +2238,7 @@ void Engine::tightenBoundsOnConstraintMatrix() struct timespec start = TimeUtils::sampleMicro(); if ( _statistics.getLongAttribute( Statistics::NUM_MAIN_LOOP_ITERATIONS ) % - GlobalConfiguration::BOUND_TIGHTING_ON_CONSTRAINT_MATRIX_FREQUENCY == + GlobalConfiguration::BOUND_TIGHTENING_ON_CONSTRAINT_MATRIX_FREQUENCY == 0 ) { _rowBoundTightener->examineConstraintMatrix( true ); @@ -2279,7 +2285,7 @@ void Engine::performPrecisionRestoration( PrecisionRestorer::RestoreBasics resto double before = _degradationChecker.computeDegradation( *_tableau ); // - _precisionRestorer.restorePrecision( *this, *_tableau, _smtCore, restoreBasics ); + _precisionRestorer.restorePrecision( *this, *_tableau, _searchTreeHandler, restoreBasics ); struct timespec end = TimeUtils::sampleMicro(); _statistics.incLongAttribute( Statistics::TOTAL_TIME_PRECISION_RESTORATION, TimeUtils::timePassed( start, end ) ); @@ -2300,7 +2306,7 @@ void Engine::performPrecisionRestoration( PrecisionRestorer::RestoreBasics resto // Try again! start = TimeUtils::sampleMicro(); _precisionRestorer.restorePrecision( - *this, *_tableau, _smtCore, PrecisionRestorer::DO_NOT_RESTORE_BASICS ); + *this, *_tableau, _searchTreeHandler, PrecisionRestorer::DO_NOT_RESTORE_BASICS ); end = TimeUtils::sampleMicro(); _statistics.incLongAttribute( Statistics::TOTAL_TIME_PRECISION_RESTORATION, TimeUtils::timePassed( start, end ) ); @@ -2346,7 +2352,7 @@ Query *Engine::getQuery() void Engine::checkBoundCompliancyWithDebugSolution() { - if ( _smtCore.checkSkewFromDebuggingSolution() ) + if ( _searchTreeHandler.checkSkewFromDebuggingSolution() ) { // The stack is compliant, we should not have learned any non-compliant bounds for ( const auto &var : _preprocessedQuery->_debuggingSolution ) @@ -2441,7 +2447,7 @@ unsigned Engine::performSymbolicBoundTightening( Query *inputQuery ) // Step 1: tell the NLR about the current bounds if ( inputQuery ) { - // Obtain from and store bounds into inputquery if it is not null. + // Obtain from and store bounds into inputQuery if it is not null. _networkLevelReasoner->obtainCurrentBounds( *inputQuery ); } else @@ -2552,16 +2558,15 @@ void Engine::reset() resetStatistics(); _sncMode = false; clearViolatedPLConstraints(); - resetSmtCore(); + resetSearchTreeHandler(); resetBoundTighteners(); - resetExitCode(); } void Engine::resetStatistics() { Statistics statistics; _statistics = statistics; - _smtCore.setStatistics( &_statistics ); + _searchTreeHandler.setStatistics( &_statistics ); _tableau->setStatistics( &_statistics ); _rowBoundTightener->setStatistics( &_statistics ); _preprocessor.setStatistics( &_statistics ); @@ -2576,10 +2581,10 @@ void Engine::clearViolatedPLConstraints() _plConstraintToFix = NULL; } -void Engine::resetSmtCore() +void Engine::resetSearchTreeHandler() { - _smtCore.reset(); - _smtCore.initializeScoreTrackerIfNeeded( _plConstraints ); + _searchTreeHandler.reset(); + _searchTreeHandler.initializeScoreTrackerIfNeeded( _plConstraints ); } void Engine::resetExitCode() @@ -2691,9 +2696,9 @@ void Engine::decideBranchingHeuristics() _preprocessedQuery->getInputVariables().size() < GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD ) { - // NOTE: the benefit of input splitting is minimal with abstract intepretation disabled. - // Therefore, since the proof production mode does not currently support that, we do - // not perform input-splitting in proof production mode. + // NOTE: the benefit of input splitting is minimal with abstract interpretation + // disabled. Therefore, since the proof production mode does not currently support that, + // we do not perform input-splitting in proof production mode. divideStrategy = DivideStrategy::LargestInterval; if ( _verbosity >= 2 ) printf( "Branching heuristics set to LargestInterval\n" ); @@ -2715,8 +2720,8 @@ void Engine::decideBranchingHeuristics() } } ASSERT( divideStrategy != DivideStrategy::Auto ); - _smtCore.setBranchingHeuristics( divideStrategy ); - _smtCore.initializeScoreTrackerIfNeeded( _plConstraints ); + _searchTreeHandler.setBranchingHeuristics( divideStrategy ); + _searchTreeHandler.initializeScoreTrackerIfNeeded( _plConstraints ); } PiecewiseLinearConstraint *Engine::pickSplitPLConstraintBasedOnBaBsrHeuristic() @@ -2888,8 +2893,8 @@ PiecewiseLinearConstraint *Engine::pickSplitPLConstraint( DivideStrategy strateg PiecewiseLinearConstraint *candidatePLConstraint = NULL; if ( strategy == DivideStrategy::PseudoImpact ) { - if ( _smtCore.getStackDepth() > 3 ) - candidatePLConstraint = _smtCore.getConstraintsWithHighestScore(); + if ( _context.getLevel() > 3 ) + candidatePLConstraint = _searchTreeHandler.getConstraintsWithHighestScore(); else if ( !_preprocessedQuery->getInputVariables().empty() && _preprocessedQuery->getInputVariables().size() < GlobalConfiguration::INTERVAL_SPLITTING_THRESHOLD ) @@ -2898,7 +2903,7 @@ PiecewiseLinearConstraint *Engine::pickSplitPLConstraint( DivideStrategy strateg { candidatePLConstraint = pickSplitPLConstraintBasedOnPolarity(); if ( candidatePLConstraint == NULL ) - candidatePLConstraint = _smtCore.getConstraintsWithHighestScore(); + candidatePLConstraint = _searchTreeHandler.getConstraintsWithHighestScore(); } } else if ( strategy == DivideStrategy::BaBSR ) @@ -2908,8 +2913,7 @@ PiecewiseLinearConstraint *Engine::pickSplitPLConstraint( DivideStrategy strateg else if ( strategy == DivideStrategy::EarliestReLU ) candidatePLConstraint = pickSplitPLConstraintBasedOnTopology(); else if ( strategy == DivideStrategy::LargestInterval && - ( ( _smtCore.getStackDepth() + 1 ) % - GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY != + ( ( _context.getLevel() + 1 ) % GlobalConfiguration::INTERVAL_SPLITTING_FREQUENCY != 0 ) ) { // Conduct interval splitting periodically. @@ -2938,17 +2942,17 @@ PiecewiseLinearConstraint *Engine::pickSplitPLConstraintSnC( SnCDivideStrategy s return candidatePLConstraint; } -bool Engine::restoreSmtState( SmtState &smtState ) +bool Engine::restoreSearchTreeState( SearchTreeState &searchTreeState ) { try { - ASSERT( _smtCore.getStackDepth() == 0 ); + ASSERT( _searchTreeHandler.getStackDepth() == 0 ); // Step 1: all implied valid splits at root - for ( auto &validSplit : smtState._impliedValidSplitsAtRoot ) + for ( auto &validSplit : searchTreeState._impliedValidSplitsAtRoot ) { applySplit( validSplit ); - _smtCore.recordImpliedValidSplit( validSplit ); + _searchTreeHandler.recordImpliedValidSplit( validSplit ); } tightenBoundsOnConstraintMatrix(); @@ -2960,11 +2964,11 @@ bool Engine::restoreSmtState( SmtState &smtState ) while ( applyAllValidConstraintCaseSplits() ); // Step 2: replay the stack - for ( auto &stackEntry : smtState._stack ) + for ( auto &stackEntry : searchTreeState._stack ) { - _smtCore.replaySmtStackEntry( stackEntry ); + _searchTreeHandler.replaySearchTreeStackEntry( stackEntry ); // Do all the bound propagation, and set ReLU constraints to inactive (at - // least the one corresponding to the _activeSplit applied above. + // least the one corresponding to the _activeSplit applied above). tightenBoundsOnConstraintMatrix(); // For debugging purposes @@ -2982,7 +2986,7 @@ bool Engine::restoreSmtState( SmtState &smtState ) if ( _produceUNSATProofs ) explainSimplexFailure(); - if ( !_smtCore.popSplit() ) + if ( !_searchTreeHandler.popSplit() ) { if ( _verbosity > 0 ) { @@ -2998,9 +3002,9 @@ bool Engine::restoreSmtState( SmtState &smtState ) return true; } -void Engine::storeSmtState( SmtState &smtState ) +void Engine::storeSearchTreeState( SearchTreeState &searchTreeState ) { - _smtCore.storeSmtState( smtState ); + _searchTreeHandler.storeSearchTreeState( searchTreeState ); } bool Engine::solveWithMILPEncoding( double timeoutInSeconds ) @@ -3087,8 +3091,8 @@ bool Engine::performDeepSoILocalSearch() if ( initialPhasePattern.isZero() ) { if ( hasBranchingCandidate() ) - while ( !_smtCore.needToSplit() ) - _smtCore.reportRejectedPhasePatternProposal(); + while ( !_searchTreeHandler.needToSplit() ) + _searchTreeHandler.reportRejectedPhasePatternProposal(); return false; } @@ -3102,7 +3106,7 @@ bool Engine::performDeepSoILocalSearch() double costOfProposedPhasePattern = FloatUtils::infinity(); bool lastProposalAccepted = true; - while ( !_smtCore.needToSplit() ) + while ( !_searchTreeHandler.needToSplit() ) { struct timespec end = TimeUtils::sampleMicro(); _statistics.incLongAttribute( Statistics::TOTAL_TIME_LOCAL_SEARCH_MICRO, @@ -3151,8 +3155,8 @@ bool Engine::performDeepSoILocalSearch() // the SoI with the hope to branch on them early. bumpUpPseudoImpactOfPLConstraintsNotInSoI(); if ( hasBranchingCandidate() ) - while ( !_smtCore.needToSplit() ) - _smtCore.reportRejectedPhasePatternProposal(); + while ( !_searchTreeHandler.needToSplit() ) + _searchTreeHandler.reportRejectedPhasePatternProposal(); return false; } } @@ -3181,7 +3185,7 @@ bool Engine::performDeepSoILocalSearch() } else { - _smtCore.reportRejectedPhasePatternProposal(); + _searchTreeHandler.reportRejectedPhasePatternProposal(); lastProposalAccepted = false; } } @@ -3260,7 +3264,7 @@ void Engine::updatePseudoImpactWithSoICosts( double costOfLastAcceptedPhasePatte ASSERT( constraintsUpdated.size() > 0 ); // Update the Pseudo-Impact estimation. for ( const auto &constraint : constraintsUpdated ) - _smtCore.updatePLConstraintScore( constraint, score ); + _searchTreeHandler.updatePLConstraintScore( constraint, score ); } void Engine::bumpUpPseudoImpactOfPLConstraintsNotInSoI() @@ -3270,7 +3274,7 @@ void Engine::bumpUpPseudoImpactOfPLConstraintsNotInSoI() { if ( plConstraint->isActive() && !plConstraint->supportSoI() && !plConstraint->phaseFixed() && !plConstraint->satisfied() ) - _smtCore.updatePLConstraintScore( + _searchTreeHandler.updatePLConstraintScore( plConstraint, GlobalConfiguration::SCORE_BUMP_FOR_PL_CONSTRAINTS_NOT_IN_SOI ); } } @@ -3406,7 +3410,7 @@ bool Engine::shouldProduceProofs() const void Engine::explainSimplexFailure() { - ASSERT( _produceUNSATProofs ); + ASSERT( _produceUNSATProofs && _lpSolverType == LPSolverType::NATIVE ); DEBUG( checkGroundBounds() ); @@ -3434,9 +3438,12 @@ void Engine::explainSimplexFailure() ASSERT( infeasibleVar < _tableau->getN() ); ASSERT( _UNSATCertificateCurrentPointer && !( **_UNSATCertificateCurrentPointer ).getContradiction() ); + _statistics.incUnsignedAttribute( Statistics::NUM_CERTIFIED_LEAVES ); - writeContradictionToCertificate( infeasibleVar ); + Vector leafContradictionVec = computeContradiction( infeasibleVar ); + + writeContradictionToCertificate( leafContradictionVec, infeasibleVar ); ( **_UNSATCertificateCurrentPointer ).makeLeaf(); } @@ -3690,7 +3697,7 @@ const UnsatCertificateNode *Engine::getUNSATCertificateRoot() const bool Engine::certifyUNSATCertificate() { - ASSERT( _produceUNSATProofs && _UNSATCertificate && !_smtCore.getStackDepth() ); + ASSERT( _produceUNSATProofs && _UNSATCertificate && !_searchTreeHandler.getStackDepth() ); for ( auto &constraint : _plConstraints ) { @@ -3704,6 +3711,7 @@ bool Engine::certifyUNSATCertificate() } } + _UNSATCertificateCurrentPointer->get()->deleteUnusedLemmas(); struct timespec certificationStart = TimeUtils::sampleMicro(); _precisionRestorer.restoreInitialEngineState( *this ); @@ -3775,7 +3783,7 @@ void Engine::markLeafToDelegate() // Mark leaf with toDelegate Flag UnsatCertificateNode *currentUnsatCertificateNode = _UNSATCertificateCurrentPointer->get(); ASSERT( _UNSATCertificateCurrentPointer && !currentUnsatCertificateNode->getContradiction() ); - currentUnsatCertificateNode->setDelegationStatus( DelegationStatus::DELEGATE_SAVE ); + currentUnsatCertificateNode->setDelegationStatus( DelegationStatus::DELEGATE_DONT_SAVE ); currentUnsatCertificateNode->deletePLCExplanations(); _statistics.incUnsignedAttribute( Statistics::NUM_DELEGATED_LEAVES ); @@ -3813,15 +3821,13 @@ const Vector Engine::computeContradiction( unsigned infeasibleVar ) cons return contradiction; } -void Engine::writeContradictionToCertificate( unsigned infeasibleVar ) const +void Engine::writeContradictionToCertificate( const Vector &contradiction, + unsigned infeasibleVar ) const { ASSERT( _produceUNSATProofs ); - Vector leafContradictionVec = computeContradiction( infeasibleVar ); - - Contradiction *leafContradiction = leafContradictionVec.empty() - ? new Contradiction( infeasibleVar ) - : new Contradiction( leafContradictionVec ); + Contradiction *leafContradiction = contradiction.empty() ? new Contradiction( infeasibleVar ) + : new Contradiction( contradiction ); ( **_UNSATCertificateCurrentPointer ).setContradiction( leafContradiction ); } @@ -3882,12 +3888,17 @@ void Engine::extractBounds( IQuery &inputQuery ) } } -void Engine::addPLCLemma( std::shared_ptr &explanation ) +void Engine::incNumOfLemmas() { if ( !_produceUNSATProofs ) return; - ASSERT( explanation && _UNSATCertificate && _UNSATCertificateCurrentPointer ) + ASSERT( _UNSATCertificate && _UNSATCertificateCurrentPointer ) _statistics.incUnsignedAttribute( Statistics::NUM_LEMMAS ); - _UNSATCertificateCurrentPointer->get()->addPLCLemma( explanation ); + _statistics.incUnsignedAttribute( Statistics::NUM_LEMMAS_USED ); } + +const List *Engine::getPiecewiseLinearConstraints() const +{ + return &_plConstraints; +} \ No newline at end of file diff --git a/src/engine/Engine.h b/src/engine/Engine.h index a3ea1c22d3..6a38994d05 100644 --- a/src/engine/Engine.h +++ b/src/engine/Engine.h @@ -39,8 +39,8 @@ #include "PrecisionRestorer.h" #include "Preprocessor.h" #include "Query.h" +#include "SearchTreeHandler.h" #include "SignalHandler.h" -#include "SmtCore.h" #include "SmtLibWriter.h" #include "SnCDivideStrategy.h" #include "SparseUnsortedList.h" @@ -83,7 +83,7 @@ class Engine Attempt to find a feasible solution for the input within a time limit (a timeout of 0 means no time limit). Returns true if found, false if infeasible. */ - bool solve( double timeoutInSeconds = 0 ); + bool solve( double timeoutInSeconds = 0 ) override; /* Minimize the cost function with respect to the current set of linear constraints. @@ -125,9 +125,9 @@ class Engine /* Methods for storing and restoring the state of the engine. */ - void storeState( EngineState &state, TableauStateStorageLevel level ) const; - void restoreState( const EngineState &state ); - void setNumPlConstraintsDisabledByValidSplits( unsigned numConstraints ); + void storeState( EngineState &state, TableauStateStorageLevel level ) const override; + void restoreState( const EngineState &state ) override; + void setNumPlConstraintsDisabledByValidSplits( unsigned numConstraints ) override; /* Preprocessor access. @@ -138,7 +138,7 @@ class Engine /* A request from the user to terminate */ - void quitSignal(); + void quitSignal() override; const Statistics *getStatistics() const; @@ -149,7 +149,7 @@ class Engine /* Get the exit code */ - Engine::ExitCode getExitCode() const; + Engine::ExitCode getExitCode() const override; /* Get the quitRequested flag @@ -159,24 +159,24 @@ class Engine /* Get the list of input variables */ - List getInputVariables() const; + List getInputVariables() const override; /* Add equations and tightenings from a split. */ - void applySplit( const PiecewiseLinearCaseSplit &split ); + void applySplit( const PiecewiseLinearCaseSplit &split ) override; /* Hooks invoked before/after context push/pop to store/restore/update context independent data. */ - void postContextPopHook(); - void preContextPushHook(); + void postContextPopHook() override; + void preContextPushHook() override; /* Reset the state of the engine, before solving a new query (as part of DnC mode). */ - void reset(); + void reset() override; /* Reset the statistics object @@ -194,41 +194,41 @@ class Engine void setVerbosity( unsigned verbosity ); /* - Apply the stack to the newly created SmtCore, returns false if UNSAT is + Apply the stack to the newly created SearchTreeHandler, returns false if UNSAT is found in this process. */ - bool restoreSmtState( SmtState &smtState ); + bool restoreSearchTreeState( SearchTreeState &searchTreeState ) override; /* - Store the current stack of the smtCore into smtState + Store the current stack of the searchTreeHandler into searchTreeState */ - void storeSmtState( SmtState &smtState ); + void storeSearchTreeState( SearchTreeState &searchTreeState ) override; /* Pick the piecewise linear constraint for splitting */ - PiecewiseLinearConstraint *pickSplitPLConstraint( DivideStrategy strategy ); + PiecewiseLinearConstraint *pickSplitPLConstraint( DivideStrategy strategy ) override; /* Call-back from QueryDividers Pick the piecewise linear constraint for splitting */ - PiecewiseLinearConstraint *pickSplitPLConstraintSnC( SnCDivideStrategy strategy ); + PiecewiseLinearConstraint *pickSplitPLConstraintSnC( SnCDivideStrategy strategy ) override; /* PSA: The following two methods are for DnC only and should be used very cautiously. */ - void resetSmtCore(); void resetExitCode(); + void resetSearchTreeHandler(); void resetBoundTighteners(); /* Register initial split when in SnC mode */ - void applySnCSplit( PiecewiseLinearCaseSplit sncSplit, String queryId ); + void applySnCSplit( PiecewiseLinearCaseSplit sncSplit, String queryId ) override; - bool inSnCMode() const; + bool inSnCMode() const override; /* Apply bound tightenings stored in the bound manager. @@ -239,71 +239,76 @@ class Engine Apply all bound tightenings (row and matrix-based) in the queue. */ - void applyAllBoundTightenings(); + void applyAllBoundTightenings() override; /* Apply all valid case splits proposed by the constraints. Return true if a valid case split has been applied. */ - bool applyAllValidConstraintCaseSplits(); + bool applyAllValidConstraintCaseSplits() override; void setRandomSeed( unsigned seed ); /* Returns true iff the engine is in proof production mode */ - bool shouldProduceProofs() const; + bool shouldProduceProofs() const override; /* Update the ground bounds */ - void updateGroundUpperBound( unsigned var, double value ); - void updateGroundLowerBound( unsigned var, double value ); + void updateGroundUpperBound( unsigned var, double value ) override; + void updateGroundLowerBound( unsigned var, double value ) override; /* Return all ground bounds as a vector */ - double getGroundBound( unsigned var, bool isUpper ) const; + double getGroundBound( unsigned var, bool isUpper ) const override; /* Get the current pointer of the UNSAT certificate */ - UnsatCertificateNode *getUNSATCertificateCurrentPointer() const; + UnsatCertificateNode *getUNSATCertificateCurrentPointer() const override; /* Set the current pointer of the UNSAT certificate */ - void setUNSATCertificateCurrentPointer( UnsatCertificateNode *node ); + void setUNSATCertificateCurrentPointer( UnsatCertificateNode *node ) override; /* Get the pointer to the root of the UNSAT certificate */ - const UnsatCertificateNode *getUNSATCertificateRoot() const; + const UnsatCertificateNode *getUNSATCertificateRoot() const override; /* Certify the UNSAT certificate */ - bool certifyUNSATCertificate(); + bool certifyUNSATCertificate() override; /* Get the boundExplainer */ - const BoundExplainer *getBoundExplainer() const; + const BoundExplainer *getBoundExplainer() const override; /* Set the boundExplainer */ - void setBoundExplainerContent( BoundExplainer *boundExplainer ); + void setBoundExplainerContent( BoundExplainer *boundExplainer ) override; /* Propagate bound tightenings stored in the BoundManager */ - void propagateBoundManagerTightenings(); + void propagateBoundManagerTightenings() override; /* Add lemma to the UNSAT Certificate */ - void addPLCLemma( std::shared_ptr &explanation ); + void incNumOfLemmas() override; + + /* + For debugging purpose + */ + const List *getPiecewiseLinearConstraints() const override; private: enum BasisRestorationRequired { @@ -327,7 +332,7 @@ class Engine /* Context is the central object that manages memory and back-tracking - across context-dependent components - SMTCore, + across context-dependent components - SearchTreeHandler, PiecewiseLinearConstraints, BoundManager, etc. */ Context _context; @@ -387,9 +392,9 @@ class Engine AutoRowBoundTightener _rowBoundTightener; /* - The SMT engine is in charge of case splitting. + The Search Tree engine is in charge of case splitting. */ - SmtCore _smtCore; + SearchTreeHandler _searchTreeHandler; /* Number of pl constraints disabled by valid splits. @@ -592,7 +597,7 @@ class Engine void selectViolatedPlConstraint(); /* - Report the violated PL constraint to the SMT engine. + Report the violated PL constraint to the Search Tree engine. */ void reportPlViolation(); @@ -817,15 +822,15 @@ class Engine /* Get Context reference */ - Context &getContext() + Context &getContext() override { return _context; } /* - Checks whether the current bounds are consistent. Exposed for the SmtCore. + Checks whether the current bounds are consistent. Exposed for the SearchTreeHandler. */ - bool consistentBounds() const; + bool consistentBounds() const override; /* DEBUG only @@ -834,7 +839,7 @@ class Engine void checkGurobiBoundConsistency() const; /* - Proof Production data structes + Proof Production data structures */ bool _produceUNSATProofs; @@ -850,7 +855,7 @@ class Engine /* Returns the value of a variable bound, as explained by the BoundExplainer */ - double explainBound( unsigned var, bool isUpper ) const; + double explainBound( unsigned var, bool isUpper ) const override; /* Returns true iff both bounds are epsilon close to their explained bounds @@ -865,7 +870,7 @@ class Engine /* Finds the variable causing failure and updates its bounds explanations */ - void explainSimplexFailure(); + void explainSimplexFailure() override; /* Sanity check for ground bounds, returns true iff all bounds are at least as tight as their @@ -904,7 +909,8 @@ class Engine /* Writes the details of a contradiction to the UNSAT certificate node */ - void writeContradictionToCertificate( unsigned infeasibleVar ) const; + void writeContradictionToCertificate( const Vector &contradiction, + unsigned infeasibleVar ) const; }; #endif // __Engine_h__ diff --git a/src/engine/EngineState.h b/src/engine/EngineState.h index 3e174bac02..b423173f12 100644 --- a/src/engine/EngineState.h +++ b/src/engine/EngineState.h @@ -42,7 +42,7 @@ class EngineState /* A unique ID allocated to every state that is stored, for - debugging purposes. These are assigned by the SMT core. + debugging purposes. These are assigned by the Search Tree handler. */ unsigned _stateId; }; diff --git a/src/engine/IBoundManager.h b/src/engine/IBoundManager.h index f2aad70ce5..8731261b26 100644 --- a/src/engine/IBoundManager.h +++ b/src/engine/IBoundManager.h @@ -27,7 +27,6 @@ #define __IBoundManager_h__ #include "List.h" -#include "PiecewiseLinearFunctionType.h" #include "Tightening.h" #include "Vector.h" @@ -38,6 +37,7 @@ class SparseUnsortedList; class TableauRow; class ITableau; class IRowBoundTightener; +class PiecewiseLinearConstraint; class IBoundManager { public: @@ -127,13 +127,14 @@ class IBoundManager Add a lemma to the UNSATCertificateNode object Return true iff adding the lemma was successful */ - virtual bool - addLemmaExplanationAndTightenBound( unsigned var, - double value, - Tightening::BoundType affectedVarBound, - const List &causingVars, - Tightening::BoundType causingVarBound, - PiecewiseLinearFunctionType constraintType ) = 0; + virtual bool addLemmaExplanationAndTightenBound( unsigned var, + double value, + Tightening::BoundType affectedVarBound, + const List &causingVars, + Tightening::BoundType causingVarBound, + PiecewiseLinearConstraint &constraint, + bool isPhaseFixing = false, + double minTargetBound = 0 ) = 0; /* Return the content of the object containing all explanations for variable bounds in the diff --git a/src/engine/IEngine.h b/src/engine/IEngine.h index ea592bb4e5..a97f6c316a 100644 --- a/src/engine/IEngine.h +++ b/src/engine/IEngine.h @@ -32,10 +32,10 @@ class EngineState; class Equation; class PiecewiseLinearCaseSplit; -class PLCLemma; -class SmtState; +class SearchTreeState; class String; class PiecewiseLinearConstraint; +class PLCLemma; class UnsatCertificateNode; class IEngine @@ -79,15 +79,15 @@ class IEngine virtual void setNumPlConstraintsDisabledByValidSplits( unsigned numConstraints ) = 0; /* - Store the current stack of the smtCore into smtState + Store the current stack of the searchTreeHandler into searchTreeState */ - virtual void storeSmtState( SmtState &smtState ) = 0; + virtual void storeSearchTreeState( SearchTreeState &searchTreeState ) = 0; /* - Apply the stack to the newly created SmtCore, returns false if UNSAT is + Apply the stack to the newly created SearchTreeHandler, returns false if UNSAT is found in this process. */ - virtual bool restoreSmtState( SmtState &smtState ) = 0; + virtual bool restoreSearchTreeState( SearchTreeState &searchTreeState ) = 0; /* Solve the encoded query. @@ -122,8 +122,8 @@ class IEngine virtual double explainBound( unsigned var, bool isUpper ) const = 0; /* - * Update the ground bounds - */ + Update the ground bounds + */ virtual void updateGroundUpperBound( unsigned var, double value ) = 0; virtual void updateGroundLowerBound( unsigned var, double value ) = 0; @@ -190,7 +190,9 @@ class IEngine /* Add lemma to the UNSAT Certificate */ - virtual void addPLCLemma( std::shared_ptr &explanation ) = 0; + virtual void incNumOfLemmas() = 0; + + virtual const List *getPiecewiseLinearConstraints() const = 0; }; #endif // __IEngine_h__ diff --git a/src/engine/LeakyReluConstraint.cpp b/src/engine/LeakyReluConstraint.cpp index 28aec9033c..388caf52b2 100644 --- a/src/engine/LeakyReluConstraint.cpp +++ b/src/engine/LeakyReluConstraint.cpp @@ -15,11 +15,9 @@ #include "LeakyReluConstraint.h" #include "Debug.h" -#include "DivideStrategy.h" #include "FloatUtils.h" #include "GlobalConfiguration.h" #include "ITableau.h" -#include "InfeasibleQueryException.h" #include "MStringf.h" #include "MarabouError.h" #include "PiecewiseLinearCaseSplit.h" @@ -193,8 +191,14 @@ void LeakyReluConstraint::notifyLowerBound( unsigned variable, double bound ) { // If we're in the active phase, activeAux should be 0 if ( proofs ) - _boundManager->addLemmaExplanationAndTightenBound( - _activeAux, 0, Tightening::UB, { variable }, Tightening::LB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( _activeAux, + 0, + Tightening::UB, + { variable }, + Tightening::LB, + *this, + true, + 0 ); else if ( !proofs && _auxVarsInUse ) _boundManager->tightenUpperBound( _activeAux, 0 ); @@ -209,8 +213,14 @@ void LeakyReluConstraint::notifyLowerBound( unsigned variable, double bound ) else if ( variable == _f && FloatUtils::isNegative( bound ) ) { if ( proofs ) - _boundManager->addLemmaExplanationAndTightenBound( - _b, bound / _slope, Tightening::LB, { _f }, Tightening::LB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( _b, + bound / _slope, + Tightening::LB, + { _f }, + Tightening::LB, + *this, + false, + bound ); else _boundManager->tightenLowerBound( _b, bound / _slope, *_inactiveTighteningRow ); } @@ -221,8 +231,14 @@ void LeakyReluConstraint::notifyLowerBound( unsigned variable, double bound ) { // Inactive phase if ( proofs ) - _boundManager->addLemmaExplanationAndTightenBound( - _inactiveAux, 0, Tightening::UB, { _activeAux }, Tightening::LB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( _inactiveAux, + 0, + Tightening::UB, + { _activeAux }, + Tightening::LB, + *this, + true, + 0 ); else _boundManager->tightenUpperBound( _inactiveAux, 0 ); } @@ -232,8 +248,14 @@ void LeakyReluConstraint::notifyLowerBound( unsigned variable, double bound ) { // Active phase if ( proofs ) - _boundManager->addLemmaExplanationAndTightenBound( - _activeAux, 0, Tightening::UB, { _inactiveAux }, Tightening::LB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( _activeAux, + 0, + Tightening::UB, + { _inactiveAux }, + Tightening::LB, + *this, + true, + 0 ); else _boundManager->tightenUpperBound( _activeAux, 0 ); } @@ -270,8 +292,14 @@ void LeakyReluConstraint::notifyUpperBound( unsigned variable, double bound ) { unsigned partner = ( variable == _f ) ? _b : _f; if ( proofs ) - _boundManager->addLemmaExplanationAndTightenBound( - partner, bound, Tightening::UB, { variable }, Tightening::UB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( partner, + bound, + Tightening::UB, + { variable }, + Tightening::UB, + *this, + false, + bound ); else _boundManager->tightenUpperBound( partner, bound ); } @@ -280,7 +308,7 @@ void LeakyReluConstraint::notifyUpperBound( unsigned variable, double bound ) // A negative upper bound of b implies inactive phase if ( proofs && _auxVarsInUse ) _boundManager->addLemmaExplanationAndTightenBound( - _inactiveAux, 0, Tightening::UB, { _b }, Tightening::UB, getType() ); + _inactiveAux, 0, Tightening::UB, { _b }, Tightening::UB, *this, true, 0 ); _boundManager->tightenUpperBound( _f, _slope * bound, *_inactiveTighteningRow ); } @@ -289,7 +317,7 @@ void LeakyReluConstraint::notifyUpperBound( unsigned variable, double bound ) // A negative upper bound of f implies inactive phase as well if ( proofs && _auxVarsInUse ) _boundManager->addLemmaExplanationAndTightenBound( - _inactiveAux, 0, Tightening::UB, { _f }, Tightening::UB, getType() ); + _inactiveAux, 0, Tightening::UB, { _f }, Tightening::UB, *this, true, 0 ); _boundManager->tightenUpperBound( _b, bound / _slope, *_inactiveTighteningRow ); } @@ -403,7 +431,7 @@ List LeakyReluConstraint::getSmartFixes( ITablea List LeakyReluConstraint::getCaseSplits() const { - if ( _phaseStatus != PHASE_NOT_FIXED ) + if ( getPhaseStatus() != PHASE_NOT_FIXED ) throw MarabouError( MarabouError::REQUESTED_CASE_SPLITS_FROM_FIXED_CONSTRAINT ); List splits; @@ -525,14 +553,14 @@ PiecewiseLinearCaseSplit LeakyReluConstraint::getActiveSplit() const bool LeakyReluConstraint::phaseFixed() const { - return _phaseStatus != PHASE_NOT_FIXED; + return getPhaseStatus() != PHASE_NOT_FIXED; } PiecewiseLinearCaseSplit LeakyReluConstraint::getImpliedCaseSplit() const { - ASSERT( _phaseStatus != PHASE_NOT_FIXED ); + ASSERT( getPhaseStatus() != PHASE_NOT_FIXED ); - if ( _phaseStatus == RELU_PHASE_ACTIVE ) + if ( getPhaseStatus() == RELU_PHASE_ACTIVE ) return getActiveSplit(); return getInactiveSplit(); @@ -551,8 +579,8 @@ void LeakyReluConstraint::dump( String &output ) const _b, _slope, _constraintActive ? "Yes" : "No", - _phaseStatus, - phaseToString( _phaseStatus ).ascii() ); + getPhaseStatus(), + phaseToString( getPhaseStatus() ).ascii() ); output += Stringf( "b in [%s, %s], ", @@ -625,18 +653,18 @@ void LeakyReluConstraint::eliminateVariable( __attribute__( ( unused ) ) unsigne { if ( FloatUtils::gt( fixedValue, 0 ) ) { - ASSERT( _phaseStatus != RELU_PHASE_INACTIVE ); + ASSERT( getPhaseStatus() != RELU_PHASE_INACTIVE ); } else if ( FloatUtils::lt( fixedValue, 0 ) ) { - ASSERT( _phaseStatus != RELU_PHASE_ACTIVE ); + ASSERT( getPhaseStatus() != RELU_PHASE_ACTIVE ); } } else if ( variable == _activeAux ) { if ( FloatUtils::isPositive( fixedValue ) ) { - ASSERT( _phaseStatus != RELU_PHASE_ACTIVE ); + ASSERT( getPhaseStatus() != RELU_PHASE_ACTIVE ); } } else @@ -644,7 +672,7 @@ void LeakyReluConstraint::eliminateVariable( __attribute__( ( unused ) ) unsigne // This is the inactive aux variable if ( FloatUtils::isPositive( fixedValue ) ) { - ASSERT( _phaseStatus != RELU_PHASE_INACTIVE ); + ASSERT( getPhaseStatus() != RELU_PHASE_INACTIVE ); } } } ); diff --git a/src/engine/MaxConstraint.cpp b/src/engine/MaxConstraint.cpp index 13571c44a5..71d67d37a4 100644 --- a/src/engine/MaxConstraint.cpp +++ b/src/engine/MaxConstraint.cpp @@ -186,8 +186,8 @@ void MaxConstraint::notifyLowerBound( unsigned variable, double value ) } if ( phaseFixed() ) - _phaseStatus = ( _haveFeasibleEliminatedPhases ? MAX_PHASE_ELIMINATED - : variableToPhase( *_elements.begin() ) ); + setPhaseStatus( ( _haveFeasibleEliminatedPhases ? MAX_PHASE_ELIMINATED + : variableToPhase( *_elements.begin() ) ) ); if ( isActive() && _boundManager ) { @@ -247,8 +247,8 @@ void MaxConstraint::notifyUpperBound( unsigned variable, double value ) } if ( phaseFixed() ) - _phaseStatus = ( _haveFeasibleEliminatedPhases ? MAX_PHASE_ELIMINATED - : variableToPhase( *_elements.begin() ) ); + setPhaseStatus( ( _haveFeasibleEliminatedPhases ? MAX_PHASE_ELIMINATED + : variableToPhase( *_elements.begin() ) ) ); // There is no need to recompute the max lower bound and max index here. @@ -481,8 +481,8 @@ void MaxConstraint::updateVariableIndex( unsigned oldIndex, unsigned newIndex ) _elementToAux[newIndex] = auxVar; _auxToElement[auxVar] = newIndex; - if ( _phaseStatus == variableToPhase( oldIndex ) ) - _phaseStatus = variableToPhase( newIndex ); + if ( getPhaseStatus() == variableToPhase( oldIndex ) ) + setPhaseStatus( variableToPhase( newIndex ) ); } else { @@ -533,8 +533,8 @@ void MaxConstraint::eliminateVariable( unsigned var, double value ) } if ( phaseFixed() ) - _phaseStatus = ( _haveFeasibleEliminatedPhases ? MAX_PHASE_ELIMINATED - : variableToPhase( *_elements.begin() ) ); + setPhaseStatus( _haveFeasibleEliminatedPhases ? MAX_PHASE_ELIMINATED + : variableToPhase( *_elements.begin() ) ); if ( _elements.size() == 0 ) _obsolete = true; @@ -739,7 +739,7 @@ void MaxConstraint::addTableauAuxVar( unsigned tableauAuxVar, unsigned constrain _elementToTighteningRow[element] = nullptr; } -void MaxConstraint::applyTightenings( const List &tightenings ) const +void MaxConstraint::applyTightenings( const List &tightenings ) { bool proofs = _boundManager && _boundManager->shouldProduceProofs(); @@ -781,7 +781,9 @@ void MaxConstraint::applyTightenings( const List &tightenings ) cons Tightening::UB, getElements(), Tightening::UB, - getType() ); + *this, + false, + tightening._value ); else { ASSERT( _elements.exists( tightening._variable ) ); diff --git a/src/engine/MaxConstraint.h b/src/engine/MaxConstraint.h index 74445e5cf4..0b4f324dc6 100644 --- a/src/engine/MaxConstraint.h +++ b/src/engine/MaxConstraint.h @@ -301,7 +301,7 @@ class MaxConstraint : public PiecewiseLinearConstraint /* Apply tightenings in the list, discovered by getEntailedTightenings */ - void applyTightenings( const List &tightenings ) const; + void applyTightenings( const List &tightenings ); }; #endif // __MaxConstraint_h__ diff --git a/src/engine/PiecewiseLinearConstraint.cpp b/src/engine/PiecewiseLinearConstraint.cpp index 400d9ba01c..a76bc4ea5e 100644 --- a/src/engine/PiecewiseLinearConstraint.cpp +++ b/src/engine/PiecewiseLinearConstraint.cpp @@ -47,6 +47,7 @@ PiecewiseLinearConstraint::PiecewiseLinearConstraint( unsigned numCases ) , _score( FloatUtils::negativeInfinity() ) , _statistics( NULL ) , _gurobi( NULL ) + , _tableauAuxVars() { } diff --git a/src/engine/PiecewiseLinearConstraint.h b/src/engine/PiecewiseLinearConstraint.h index 4304baa019..544ebad167 100644 --- a/src/engine/PiecewiseLinearConstraint.h +++ b/src/engine/PiecewiseLinearConstraint.h @@ -204,7 +204,7 @@ class PiecewiseLinearConstraint : public ITableau::VariableWatcher /* If the constraint's phase has been fixed, get the (valid) case split. Transitioning from Valid to Implied with integration of - context-dependentSMTCore. + context-dependent SearchTreeHandler. */ virtual PiecewiseLinearCaseSplit getValidCaseSplit() const = 0; virtual PiecewiseLinearCaseSplit getImpliedCaseSplit() const = 0; @@ -363,6 +363,7 @@ class PiecewiseLinearConstraint : public ITableau::VariableWatcher { _tableau = tableau; } + /* Method to set PhaseStatus of the constraint. Encapsulates both context dependent and context-less behavior. Initialized to PHASE_NOT_FIXED. @@ -509,7 +510,8 @@ class PiecewiseLinearConstraint : public ITableau::VariableWatcher Map _upperBounds; IBoundManager *_boundManager; // Pointer to a centralized object to store bounds. - ITableau *_tableau; // Pointer to tableau which simulates CBT until we switch to CDSmtCore + ITableau *_tableau; // Pointer to tableau which simulates CBT until we switch to + // CDSearchTreeHandler CVC4::context::Context *_context; CVC4::context::CDO *_cdConstraintActive; diff --git a/src/engine/PrecisionRestorer.cpp b/src/engine/PrecisionRestorer.cpp index 6d75420e8d..ce63b7b993 100644 --- a/src/engine/PrecisionRestorer.cpp +++ b/src/engine/PrecisionRestorer.cpp @@ -19,7 +19,7 @@ #include "FloatUtils.h" #include "MalformedBasisException.h" #include "MarabouError.h" -#include "SmtCore.h" +#include "SearchTreeHandler.h" #include "TableauStateStorageLevel.h" #include "UnsatCertificateNode.h" @@ -35,9 +35,16 @@ void PrecisionRestorer::restoreInitialEngineState( IEngine &engine ) void PrecisionRestorer::restorePrecision( IEngine &engine, ITableau &tableau, - SmtCore &smtCore, + SearchTreeHandler &searchTreeHandler, RestoreBasics restoreBasics ) { + Map> plcStatusBefore; + DEBUG( { + for ( const auto *plc : *engine.getPiecewiseLinearConstraints() ) + plcStatusBefore.insert( + plc, Pair( plc->getPhaseStatus(), plc->isActive() ) ); + } ); + // Store the dimensions, bounds and basic variables in the current tableau, // before restoring it unsigned targetM = tableau.getM(); @@ -77,7 +84,7 @@ void PrecisionRestorer::restorePrecision( IEngine &engine, // Store the case splits performed so far List targetSplits; - smtCore.allSplitsSoFar( targetSplits ); + searchTreeHandler.allSplitsSoFar( targetSplits ); // Restore engine and tableau to their original form engine.restoreState( _initialEngineState ); @@ -87,7 +94,7 @@ void PrecisionRestorer::restorePrecision( IEngine &engine, // At this point, the tableau has the appropriate dimensions. Restore the // variable bounds and basic variables. Note that if column merging is // enabled, the dimensions may not be precisely those before the - // resotration, because merging sometimes fails - in which case an equation + // restoration, because merging sometimes fails - in which case an equation // is added. If we fail to restore the dimensions, we cannot restore the // basics. @@ -163,14 +170,10 @@ void PrecisionRestorer::restorePrecision( IEngine &engine, ASSERT( GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS || tableau.getN() == targetN ); ASSERT( GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS || tableau.getM() == targetM ); - // Constraints should be in the same state before and after restoration - for ( const auto &pair : targetEngineState._plConstraintToState ) + for ( const auto *plc : *engine.getPiecewiseLinearConstraints() ) { - ASSERT( pair.second->isActive() == pair.first->isActive() ); - // Only active constraints need to be synchronized - ASSERT( !pair.second->isActive() || - pair.second->phaseFixed() == pair.first->phaseFixed() ); - ASSERT( pair.second->constraintObsolete() == pair.first->constraintObsolete() ); + ASSERT( plc->getPhaseStatus() == plcStatusBefore.get( plc ).first() ); + ASSERT( plc->isActive() == plcStatusBefore.get( plc ).second() ); } EngineState currentEngineState; diff --git a/src/engine/PrecisionRestorer.h b/src/engine/PrecisionRestorer.h index 3fe0c6683b..3f058e1231 100644 --- a/src/engine/PrecisionRestorer.h +++ b/src/engine/PrecisionRestorer.h @@ -18,7 +18,7 @@ #include "EngineState.h" -class SmtCore; +class SearchTreeHandler; class PrecisionRestorer { @@ -33,7 +33,7 @@ class PrecisionRestorer void restorePrecision( IEngine &engine, ITableau &tableau, - SmtCore &smtCore, + SearchTreeHandler &searchTreeHandler, RestoreBasics restoreBasics ); private: diff --git a/src/engine/ReluConstraint.cpp b/src/engine/ReluConstraint.cpp index d48be10827..139e8563e4 100644 --- a/src/engine/ReluConstraint.cpp +++ b/src/engine/ReluConstraint.cpp @@ -173,7 +173,7 @@ void ReluConstraint::notifyLowerBound( unsigned variable, double newBound ) // If we're in the active phase, aux should be 0 if ( proofs && _auxVarInUse ) _boundManager->addLemmaExplanationAndTightenBound( - _aux, 0, Tightening::UB, { variable }, Tightening::LB, getType() ); + _aux, 0, Tightening::UB, { variable }, Tightening::LB, *this, true, 0 ); else if ( !proofs && _auxVarInUse ) _boundManager->tightenUpperBound( _aux, 0 ); @@ -187,7 +187,7 @@ void ReluConstraint::notifyLowerBound( unsigned variable, double newBound ) { if ( proofs && _auxVarInUse ) _boundManager->addLemmaExplanationAndTightenBound( - _aux, 0, Tightening::UB, { variable }, Tightening::LB, getType() ); + _aux, 0, Tightening::UB, { variable }, Tightening::LB, *this, true, 0 ); else if ( !proofs && _auxVarInUse ) _boundManager->tightenUpperBound( _aux, 0 ); } @@ -198,7 +198,7 @@ void ReluConstraint::notifyLowerBound( unsigned variable, double newBound ) { if ( proofs ) _boundManager->addLemmaExplanationAndTightenBound( - _f, 0, Tightening::UB, { variable }, Tightening::LB, getType() ); + _f, 0, Tightening::UB, { variable }, Tightening::LB, *this, true, 0 ); else _boundManager->tightenUpperBound( _f, 0 ); @@ -212,11 +212,17 @@ void ReluConstraint::notifyLowerBound( unsigned variable, double newBound ) if ( proofs ) { // If already inactive, tightening is linear - if ( _phaseStatus == RELU_PHASE_INACTIVE ) + if ( getPhaseStatus() == RELU_PHASE_INACTIVE ) _boundManager->tightenUpperBound( _aux, -bound, *_tighteningRow ); - else if ( _phaseStatus == PHASE_NOT_FIXED ) - _boundManager->addLemmaExplanationAndTightenBound( - _aux, -bound, Tightening::UB, { variable }, Tightening::LB, getType() ); + else if ( getPhaseStatus() == PHASE_NOT_FIXED ) + _boundManager->addLemmaExplanationAndTightenBound( _aux, + -bound, + Tightening::UB, + { variable }, + Tightening::LB, + *this, + false, + bound ); } else _boundManager->tightenUpperBound( _aux, -bound ); @@ -228,7 +234,7 @@ void ReluConstraint::notifyLowerBound( unsigned variable, double newBound ) { if ( proofs ) _boundManager->addLemmaExplanationAndTightenBound( - _f, 0, Tightening::LB, { variable }, Tightening::LB, getType() ); + _f, 0, Tightening::LB, { variable }, Tightening::LB, *this, false, 0 ); else _boundManager->tightenLowerBound( _f, 0 ); } @@ -267,13 +273,19 @@ void ReluConstraint::notifyUpperBound( unsigned variable, double newBound ) { if ( proofs ) { - if ( _phaseStatus != RELU_PHASE_INACTIVE ) + if ( getPhaseStatus() != RELU_PHASE_INACTIVE ) _boundManager->tightenUpperBound( _b, bound, *_tighteningRow ); else { if ( !FloatUtils::isPositive( bound ) ) - _boundManager->addLemmaExplanationAndTightenBound( - _b, 0, Tightening::UB, { variable }, Tightening::UB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( _b, + 0, + Tightening::UB, + { variable }, + Tightening::UB, + *this, + true, + 0 ); // Bound cannot be negative if ReLU is inactive if ( FloatUtils::isNegative( bound ) ) throw InfeasibleQueryException(); @@ -289,7 +301,7 @@ void ReluConstraint::notifyUpperBound( unsigned variable, double newBound ) // If b has a non-positive upper bound, f's upper bound is 0 if ( proofs ) _boundManager->addLemmaExplanationAndTightenBound( - _f, 0, Tightening::UB, { variable }, Tightening::UB, getType() ); + _f, 0, Tightening::UB, { variable }, Tightening::UB, *this, true, 0 ); else _boundManager->tightenUpperBound( _f, 0 ); @@ -304,15 +316,17 @@ void ReluConstraint::notifyUpperBound( unsigned variable, double newBound ) if ( proofs ) { // If already inactive, tightening is linear - if ( _phaseStatus == RELU_PHASE_ACTIVE ) + if ( getPhaseStatus() == RELU_PHASE_ACTIVE ) _boundManager->tightenUpperBound( _f, bound, *_tighteningRow ); - else if ( _phaseStatus == PHASE_NOT_FIXED ) + else if ( getPhaseStatus() == PHASE_NOT_FIXED ) _boundManager->addLemmaExplanationAndTightenBound( _f, bound, Tightening::UB, { variable }, Tightening::UB, - getType() ); + *this, + false, + bound ); } else _boundManager->tightenUpperBound( _f, bound ); @@ -322,13 +336,19 @@ void ReluConstraint::notifyUpperBound( unsigned variable, double newBound ) { if ( proofs ) { - if ( _phaseStatus != RELU_PHASE_ACTIVE ) + if ( getPhaseStatus() != RELU_PHASE_ACTIVE ) _boundManager->tightenLowerBound( _b, -bound, *_tighteningRow ); else { if ( !FloatUtils::isPositive( bound ) ) - _boundManager->addLemmaExplanationAndTightenBound( - _b, 0, Tightening::LB, { variable }, Tightening::UB, getType() ); + _boundManager->addLemmaExplanationAndTightenBound( _b, + 0, + Tightening::LB, + { variable }, + Tightening::UB, + *this, + true, + 0 ); // Bound cannot be negative if ReLU is active if ( FloatUtils::isNegative( bound ) ) throw InfeasibleQueryException(); @@ -554,7 +574,7 @@ List ReluConstraint::getSmartFixes( ITableau *ta List ReluConstraint::getCaseSplits() const { - if ( _phaseStatus != PHASE_NOT_FIXED ) + if ( getPhaseStatus() != PHASE_NOT_FIXED ) throw MarabouError( MarabouError::REQUESTED_CASE_SPLITS_FROM_FIXED_CONSTRAINT ); List splits; @@ -664,14 +684,14 @@ PiecewiseLinearCaseSplit ReluConstraint::getActiveSplit() const bool ReluConstraint::phaseFixed() const { - return _phaseStatus != PHASE_NOT_FIXED; + return getPhaseStatus() != PHASE_NOT_FIXED; } PiecewiseLinearCaseSplit ReluConstraint::getImpliedCaseSplit() const { - ASSERT( _phaseStatus != PHASE_NOT_FIXED ); + ASSERT( getPhaseStatus() != PHASE_NOT_FIXED ); - if ( _phaseStatus == RELU_PHASE_ACTIVE ) + if ( getPhaseStatus() == RELU_PHASE_ACTIVE ) return getActiveSplit(); return getInactiveSplit(); @@ -688,8 +708,8 @@ void ReluConstraint::dump( String &output ) const _f, _b, _constraintActive ? "Yes" : "No", - _phaseStatus, - phaseToString( _phaseStatus ).ascii() ); + getPhaseStatus(), + phaseToString( getPhaseStatus() ).ascii() ); output += Stringf( "b in [%s, %s], ", @@ -756,11 +776,11 @@ void ReluConstraint::eliminateVariable( __attribute__( ( unused ) ) unsigned var { if ( FloatUtils::gt( fixedValue, 0 ) ) { - ASSERT( _phaseStatus != RELU_PHASE_INACTIVE ); + ASSERT( getPhaseStatus() != RELU_PHASE_INACTIVE ); } else if ( FloatUtils::lt( fixedValue, 0 ) ) { - ASSERT( _phaseStatus != RELU_PHASE_ACTIVE ); + ASSERT( getPhaseStatus() != RELU_PHASE_ACTIVE ); } } else @@ -768,7 +788,7 @@ void ReluConstraint::eliminateVariable( __attribute__( ( unused ) ) unsigned var // This is the aux variable if ( FloatUtils::isPositive( fixedValue ) ) { - ASSERT( _phaseStatus != RELU_PHASE_ACTIVE ); + ASSERT( getPhaseStatus() != RELU_PHASE_ACTIVE ); } } } ); diff --git a/src/engine/SmtCore.cpp b/src/engine/SearchTreeHandler.cpp similarity index 86% rename from src/engine/SmtCore.cpp rename to src/engine/SearchTreeHandler.cpp index bdf75aadaa..9fc319d5a7 100644 --- a/src/engine/SmtCore.cpp +++ b/src/engine/SearchTreeHandler.cpp @@ -1,5 +1,5 @@ /********************* */ -/*! \file SmtCore.cpp +/*! \file SearchTreeHandler.cpp ** \verbatim ** Top contributors (to current version): ** Guy Katz, Parth Shah, Duligur Ibeling @@ -11,12 +11,11 @@ ** ** [[ Add lengthier description here ]] - **/ +**/ -#include "SmtCore.h" +#include "SearchTreeHandler.h" #include "Debug.h" -#include "DivideStrategy.h" #include "EngineState.h" #include "FloatUtils.h" #include "GlobalConfiguration.h" @@ -25,10 +24,9 @@ #include "MarabouError.h" #include "Options.h" #include "PseudoImpactTracker.h" -#include "ReluConstraint.h" #include "UnsatCertificateNode.h" -SmtCore::SmtCore( IEngine *engine ) +SearchTreeHandler::SearchTreeHandler( IEngine *engine ) : _statistics( NULL ) , _engine( engine ) , _context( _engine->getContext() ) @@ -44,12 +42,12 @@ SmtCore::SmtCore( IEngine *engine ) { } -SmtCore::~SmtCore() +SearchTreeHandler::~SearchTreeHandler() { freeMemory(); } -void SmtCore::freeMemory() +void SearchTreeHandler::freeMemory() { for ( const auto &stackEntry : _stack ) { @@ -60,7 +58,7 @@ void SmtCore::freeMemory() _stack.clear(); } -void SmtCore::reset() +void SearchTreeHandler::reset() { _context.popto( 0 ); _engine->postContextPopHook(); @@ -73,7 +71,7 @@ void SmtCore::reset() _numRejectedPhasePatternProposal = 0; } -void SmtCore::reportViolatedConstraint( PiecewiseLinearConstraint *constraint ) +void SearchTreeHandler::reportViolatedConstraint( PiecewiseLinearConstraint *constraint ) { if ( !_constraintToViolationCount.exists( constraint ) ) _constraintToViolationCount[constraint] = 0; @@ -90,7 +88,7 @@ void SmtCore::reportViolatedConstraint( PiecewiseLinearConstraint *constraint ) } } -unsigned SmtCore::getViolationCounts( PiecewiseLinearConstraint *constraint ) const +unsigned SearchTreeHandler::getViolationCounts( PiecewiseLinearConstraint *constraint ) const { if ( !_constraintToViolationCount.exists( constraint ) ) return 0; @@ -98,7 +96,7 @@ unsigned SmtCore::getViolationCounts( PiecewiseLinearConstraint *constraint ) co return _constraintToViolationCount[constraint]; } -void SmtCore::initializeScoreTrackerIfNeeded( +void SearchTreeHandler::initializeScoreTrackerIfNeeded( const List &plConstraints ) { if ( GlobalConfiguration::USE_DEEPSOI_LOCAL_SEARCH ) @@ -106,11 +104,11 @@ void SmtCore::initializeScoreTrackerIfNeeded( _scoreTracker = std::unique_ptr( new PseudoImpactTracker() ); _scoreTracker->initialize( plConstraints ); - SMT_LOG( "\tTracking Pseudo Impact..." ); + SEARCH_TREE_LOG( "\tTracking Pseudo Impact..." ); } } -void SmtCore::reportRejectedPhasePatternProposal() +void SearchTreeHandler::reportRejectedPhasePatternProposal() { ++_numRejectedPhasePatternProposal; @@ -126,12 +124,12 @@ void SmtCore::reportRejectedPhasePatternProposal() } } -bool SmtCore::needToSplit() const +bool SearchTreeHandler::needToSplit() const { return _needToSplit; } -void SmtCore::performSplit() +void SearchTreeHandler::performSplit() { ASSERT( _needToSplit ); @@ -182,7 +180,7 @@ void SmtCore::performSplit() new UnsatCertificateNode( certificateNode, childSplit ); } - SmtStackEntry *stackEntry = new SmtStackEntry; + SearchTreeStackEntry *stackEntry = new SearchTreeStackEntry; // Perform the first split: add bounds and equations List::iterator split = splits.begin(); ASSERT( split->getEquations().size() == 0 ); @@ -218,21 +216,21 @@ void SmtCore::performSplit() if ( level > _statistics->getUnsignedAttribute( Statistics::MAX_DECISION_LEVEL ) ) _statistics->setUnsignedAttribute( Statistics::MAX_DECISION_LEVEL, level ); struct timespec end = TimeUtils::sampleMicro(); - _statistics->incLongAttribute( Statistics::TOTAL_TIME_SMT_CORE_MICRO, + _statistics->incLongAttribute( Statistics::TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO, TimeUtils::timePassed( start, end ) ); } _constraintForSplitting = NULL; } -unsigned SmtCore::getStackDepth() const +unsigned SearchTreeHandler::getStackDepth() const { ASSERT( ( _engine->inSnCMode() || _stack.size() == static_cast( _context.getLevel() ) ) ); return _stack.size(); } -void SmtCore::popContext() +void SearchTreeHandler::popContext() { struct timespec start = TimeUtils::sampleMicro(); _context.pop(); @@ -246,7 +244,7 @@ void SmtCore::popContext() } } -void SmtCore::pushContext() +void SearchTreeHandler::pushContext() { struct timespec start = TimeUtils::sampleMicro(); _context.push(); @@ -260,9 +258,9 @@ void SmtCore::pushContext() } } -bool SmtCore::popSplit() +bool SearchTreeHandler::popSplit() { - SMT_LOG( "Performing a pop" ); + SEARCH_TREE_LOG( "Performing a pop" ); if ( _stack.empty() ) return false; @@ -300,6 +298,7 @@ bool SmtCore::popSplit() { UnsatCertificateNode *certificateNode = _engine->getUNSATCertificateCurrentPointer(); + certificateNode->deleteUnusedLemmas(); _engine->setUNSATCertificateCurrentPointer( certificateNode->getParent() ); } @@ -314,14 +313,17 @@ bool SmtCore::popSplit() throw MarabouError( MarabouError::DEBUGGING_ERROR ); } - SmtStackEntry *stackEntry = _stack.back(); + SearchTreeStackEntry *stackEntry = _stack.back(); + + if ( _engine->shouldProduceProofs() && _engine->getUNSATCertificateCurrentPointer() ) + _engine->getUNSATCertificateCurrentPointer()->deleteUnusedLemmas(); popContext(); _engine->postContextPopHook(); // Restore the state of the engine - SMT_LOG( "\tRestoring engine state..." ); + SEARCH_TREE_LOG( "\tRestoring engine state..." ); _engine->restoreState( *( stackEntry->_engineState ) ); - SMT_LOG( "\tRestoring engine state - DONE" ); + SEARCH_TREE_LOG( "\tRestoring engine state - DONE" ); // Apply the new split and erase it from the list auto split = stackEntry->_alternativeSplits.begin(); @@ -339,6 +341,7 @@ bool SmtCore::popSplit() UnsatCertificateNode *splitChild = certificateNode->getChildBySplit( *split ); while ( !splitChild ) { + certificateNode->deleteUnusedLemmas(); certificateNode = certificateNode->getParent(); ASSERT( certificateNode ); splitChild = certificateNode->getChildBySplit( *split ); @@ -348,12 +351,12 @@ bool SmtCore::popSplit() ASSERT( _engine->getUNSATCertificateCurrentPointer()->getSplit() == *split ); } - SMT_LOG( "\tApplying new split..." ); + SEARCH_TREE_LOG( "\tApplying new split..." ); ASSERT( split->getEquations().size() == 0 ); _engine->preContextPushHook(); pushContext(); _engine->applySplit( *split ); - SMT_LOG( "\tApplying new split - DONE" ); + SEARCH_TREE_LOG( "\tApplying new split - DONE" ); stackEntry->_activeSplit = *split; stackEntry->_alternativeSplits.erase( split ); @@ -371,7 +374,7 @@ bool SmtCore::popSplit() if ( level > _statistics->getUnsignedAttribute( Statistics::MAX_DECISION_LEVEL ) ) _statistics->setUnsignedAttribute( Statistics::MAX_DECISION_LEVEL, level ); struct timespec end = TimeUtils::sampleMicro(); - _statistics->incLongAttribute( Statistics::TOTAL_TIME_SMT_CORE_MICRO, + _statistics->incLongAttribute( Statistics::TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO, TimeUtils::timePassed( start, end ) ); } @@ -380,14 +383,14 @@ bool SmtCore::popSplit() return true; } -void SmtCore::resetSplitConditions() +void SearchTreeHandler::resetSplitConditions() { _constraintToViolationCount.clear(); _numRejectedPhasePatternProposal = 0; _needToSplit = false; } -void SmtCore::recordImpliedValidSplit( PiecewiseLinearCaseSplit &validSplit ) +void SearchTreeHandler::recordImpliedValidSplit( PiecewiseLinearCaseSplit &validSplit ) { if ( _stack.empty() ) _impliedValidSplitsAtRoot.append( validSplit ); @@ -397,7 +400,7 @@ void SmtCore::recordImpliedValidSplit( PiecewiseLinearCaseSplit &validSplit ) checkSkewFromDebuggingSolution(); } -void SmtCore::allSplitsSoFar( List &result ) const +void SearchTreeHandler::allSplitsSoFar( List &result ) const { result.clear(); @@ -412,19 +415,19 @@ void SmtCore::allSplitsSoFar( List &result ) const } } -void SmtCore::setStatistics( Statistics *statistics ) +void SearchTreeHandler::setStatistics( Statistics *statistics ) { _statistics = statistics; } -void SmtCore::storeDebuggingSolution( const Map &debuggingSolution ) +void SearchTreeHandler::storeDebuggingSolution( const Map &debuggingSolution ) { _debuggingSolution = debuggingSolution; } // Return true if stack is currently compliant, false otherwise // If there is no stored solution, return false --- incompliant. -bool SmtCore::checkSkewFromDebuggingSolution() +bool SearchTreeHandler::checkSkewFromDebuggingSolution() { if ( _debuggingSolution.empty() ) return false; @@ -476,8 +479,8 @@ bool SmtCore::checkSkewFromDebuggingSolution() return true; } -bool SmtCore::splitAllowsStoredSolution( const PiecewiseLinearCaseSplit &split, - String &error ) const +bool SearchTreeHandler::splitAllowsStoredSolution( const PiecewiseLinearCaseSplit &split, + String &error ) const { // False if the split prevents one of the values in the stored solution, true otherwise. error = ""; @@ -520,7 +523,7 @@ bool SmtCore::splitAllowsStoredSolution( const PiecewiseLinearCaseSplit &split, return true; } -PiecewiseLinearConstraint *SmtCore::chooseViolatedConstraintForFixing( +PiecewiseLinearConstraint *SearchTreeHandler::chooseViolatedConstraintForFixing( List &_violatedPlConstraints ) const { ASSERT( !_violatedPlConstraints.empty() ); @@ -554,7 +557,7 @@ PiecewiseLinearConstraint *SmtCore::chooseViolatedConstraintForFixing( return candidate; } -void SmtCore::replaySmtStackEntry( SmtStackEntry *stackEntry ) +void SearchTreeHandler::replaySearchTreeStackEntry( SearchTreeStackEntry *stackEntry ) { struct timespec start = TimeUtils::sampleMicro(); @@ -585,22 +588,22 @@ void SmtCore::replaySmtStackEntry( SmtStackEntry *stackEntry ) if ( level > _statistics->getUnsignedAttribute( Statistics::MAX_DECISION_LEVEL ) ) _statistics->setUnsignedAttribute( Statistics::MAX_DECISION_LEVEL, level ); struct timespec end = TimeUtils::sampleMicro(); - _statistics->incLongAttribute( Statistics::TOTAL_TIME_SMT_CORE_MICRO, + _statistics->incLongAttribute( Statistics::TOTAL_TIME_SEARCH_TREE_HANDLER_MICRO, TimeUtils::timePassed( start, end ) ); } } -void SmtCore::storeSmtState( SmtState &smtState ) +void SearchTreeHandler::storeSearchTreeState( SearchTreeState &searchTreeState ) { - smtState._impliedValidSplitsAtRoot = _impliedValidSplitsAtRoot; + searchTreeState._impliedValidSplitsAtRoot = _impliedValidSplitsAtRoot; for ( auto &stackEntry : _stack ) - smtState._stack.append( stackEntry->duplicateSmtStackEntry() ); + searchTreeState._stack.append( stackEntry->duplicateSearchTreeStackEntry() ); - smtState._stateId = _stateId; + searchTreeState._stateId = _stateId; } -bool SmtCore::pickSplitPLConstraint() +bool SearchTreeHandler::pickSplitPLConstraint() { if ( _needToSplit ) { diff --git a/src/engine/SmtCore.h b/src/engine/SearchTreeHandler.h similarity index 79% rename from src/engine/SmtCore.h rename to src/engine/SearchTreeHandler.h index ad1d61f8e9..25f7e56a6e 100644 --- a/src/engine/SmtCore.h +++ b/src/engine/SearchTreeHandler.h @@ -1,5 +1,5 @@ /********************* */ -/*! \file SmtCore.h +/*! \file SearchTreeHandler.h ** \verbatim ** Top contributors (to current version): ** Guy Katz, Parth Shah @@ -13,22 +13,25 @@ **/ -#ifndef __SmtCore_h__ -#define __SmtCore_h__ +#ifndef __SearchTreeHandler_h__ +#define __SearchTreeHandler_h__ #include "DivideStrategy.h" +#include "HashMap.h" #include "PLConstraintScoreTracker.h" #include "PiecewiseLinearCaseSplit.h" #include "PiecewiseLinearConstraint.h" -#include "SmtStackEntry.h" -#include "SmtState.h" +#include "SearchTreeStackEntry.h" +#include "SearchTreeState.h" #include "Stack.h" #include "Statistics.h" +#include "context/cdhashmap.h" #include "context/context.h" #include -#define SMT_LOG( x, ... ) LOG( GlobalConfiguration::SMT_CORE_LOGGING, "SmtCore: %s\n", x ) +#define SEARCH_TREE_LOG( x, ... ) \ + LOG( GlobalConfiguration::SEARCH_TREE_HANDLER_LOGGING, "SearchTreeHandler: %s\n", x ) class EngineState; class IEngine; @@ -36,11 +39,11 @@ class String; using CVC4::context::Context; -class SmtCore +class SearchTreeHandler { public: - SmtCore( IEngine *engine ); - ~SmtCore(); + explicit SearchTreeHandler( IEngine *engine ); + ~SearchTreeHandler(); /* Clear the stack. @@ -48,7 +51,7 @@ class SmtCore void freeMemory(); /* - Reset the SmtCore + Reset the SearchTreeHandler */ void reset(); @@ -58,7 +61,7 @@ class SmtCore void initializeScoreTrackerIfNeeded( const List &plConstraints ); /* - Inform the SMT core that a SoI phase pattern proposal is rejected. + Inform the Search Tree handler that a SoI phase pattern proposal is rejected. */ void reportRejectedPhasePatternProposal(); @@ -80,7 +83,7 @@ class SmtCore } /* - Inform the SMT core that a PL constraint is violated. + Inform the Search Tree handler that a PL constraint is violated. */ void reportViolatedConstraint( PiecewiseLinearConstraint *constraint ); @@ -97,7 +100,7 @@ class SmtCore void resetSplitConditions(); /* - Returns true iff the SMT core wants to perform a case split. + Returns true iff the Search Tree handler wants to perform a case split. */ bool needToSplit() const; @@ -123,30 +126,29 @@ class SmtCore */ void pushContext(); - /* The current stack depth. */ unsigned getStackDepth() const; /* - Let the smt core know of an implied valid case split that was discovered. + Let the search tree handler know of an implied valid case split that was discovered. */ void recordImpliedValidSplit( PiecewiseLinearCaseSplit &validSplit ); /* - Return a list of all splits performed so far, both SMT-originating and valid ones, + Return a list of all splits performed so far, both Search Tree-originating and valid ones, in the correct order. */ void allSplitsSoFar( List &result ) const; /* - Have the SMT core start reporting statistics. + Have the Search Tree handler start reporting statistics. */ void setStatistics( Statistics *statistics ); /* - Have the SMT core choose, among a set of violated PL constraints, which + Have the Search Tree handler choose, among a set of violated PL constraints, which constraint should be repaired (without splitting) */ PiecewiseLinearConstraint *chooseViolatedConstraintForFixing( @@ -160,12 +162,12 @@ class SmtCore /* Replay a stackEntry */ - void replaySmtStackEntry( SmtStackEntry *stackEntry ); + void replaySearchTreeStackEntry( SearchTreeStackEntry *stackEntry ); /* - Store the current state of the SmtCore into smtState + Store the current state of the SearchTreeHandler into searchTreeState */ - void storeSmtState( SmtState &smtState ); + void storeSearchTreeState( SearchTreeState &searchTreeState ); /* Pick the piecewise linear constraint for splitting, returns true @@ -194,7 +196,7 @@ class SmtCore /* The case-split stack. */ - List _stack; + List _stack; /* The engine. @@ -205,6 +207,7 @@ class SmtCore Context for synchronizing the search. */ Context &_context; + /* Do we need to perform a split and on which constraint. */ @@ -247,7 +250,7 @@ class SmtCore /* Heap to store the scores of each PLConstraint. */ - std::unique_ptr _scoreTracker; + std::shared_ptr _scoreTracker; /* Number of times the phase pattern proposal has been rejected at the @@ -256,7 +259,7 @@ class SmtCore unsigned _numRejectedPhasePatternProposal; }; -#endif // __SmtCore_h__ +#endif // __SearchTreeHandler_h__ // // Local Variables: diff --git a/src/engine/SmtStackEntry.h b/src/engine/SearchTreeStackEntry.h similarity index 80% rename from src/engine/SmtStackEntry.h rename to src/engine/SearchTreeStackEntry.h index ec9b3f2ce2..a701d9ffd3 100644 --- a/src/engine/SmtStackEntry.h +++ b/src/engine/SearchTreeStackEntry.h @@ -1,5 +1,5 @@ /********************* */ -/*! \file SmtStackEntry.h +/*! \file SearchTreeStackEntry.h ** \verbatim ** Top contributors (to current version): ** Guy Katz, Haoze Wu @@ -13,8 +13,8 @@ **/ -#ifndef __SmtStackEntry_h__ -#define __SmtStackEntry_h__ +#ifndef __SearchTreeStackEntry_h__ +#define __SearchTreeStackEntry_h__ #include "EngineState.h" #include "PiecewiseLinearCaseSplit.h" @@ -24,7 +24,7 @@ the active split, the alternative splits (in case of backtrack), and also any implied splits that were discovered subsequently. */ -struct SmtStackEntry +struct SearchTreeStackEntry { public: PiecewiseLinearCaseSplit _activeSplit; @@ -33,14 +33,14 @@ struct SmtStackEntry EngineState *_engineState; /* - Create a copy of the SmtStackEntry on the stack and returns a pointer to + Create a copy of the SearchTreeStackEntry on the stack and returns a pointer to the copy. We do not copy the engineState for now, since where this method is called, we recreate the engineState by replaying the caseSplits. */ - SmtStackEntry *duplicateSmtStackEntry() + SearchTreeStackEntry *duplicateSearchTreeStackEntry() { - SmtStackEntry *copy = new SmtStackEntry(); + SearchTreeStackEntry *copy = new SearchTreeStackEntry(); copy->_activeSplit = _activeSplit; copy->_impliedValidSplits = _impliedValidSplits; @@ -51,7 +51,7 @@ struct SmtStackEntry } }; -#endif // __SmtStackEntry_h__ +#endif // __SearchTreeStackEntry_h__ // // Local Variables: diff --git a/src/engine/SmtState.h b/src/engine/SearchTreeState.h similarity index 82% rename from src/engine/SmtState.h rename to src/engine/SearchTreeState.h index d513b67b34..23d0353128 100644 --- a/src/engine/SmtState.h +++ b/src/engine/SearchTreeState.h @@ -1,5 +1,5 @@ /********************* */ -/*! \file SmtState.h +/*! \file SearchTreeState.h ** \verbatim ** Top contributors (to current version): ** Guy Katz, Duligur Ibeling @@ -11,17 +11,17 @@ ** ** [[ Add lengthier description here ]] - **/ +**/ -#ifndef __SmtState_h__ -#define __SmtState_h__ +#ifndef __SearchTreeState_h__ +#define __SearchTreeState_h__ #include "List.h" #include "Map.h" #include "PiecewiseLinearConstraint.h" -#include "SmtStackEntry.h" +#include "SearchTreeStackEntry.h" -class SmtState +class SearchTreeState { public: /* @@ -32,7 +32,7 @@ class SmtState /* The stack. */ - List _stack; + List _stack; /* A unique ID allocated to every state that is stored, for @@ -41,7 +41,7 @@ class SmtState unsigned _stateId; }; -#endif // __SmtState_h__ +#endif // __SearchTreeState_h__ // // Local Variables: diff --git a/src/engine/SignConstraint.cpp b/src/engine/SignConstraint.cpp index 744da2da94..fea9546bc2 100644 --- a/src/engine/SignConstraint.cpp +++ b/src/engine/SignConstraint.cpp @@ -23,7 +23,6 @@ #include "MarabouError.h" #include "PiecewiseLinearCaseSplit.h" #include "Query.h" -#include "SignConstraint.h" #include "Statistics.h" #ifdef _WIN32 @@ -123,7 +122,7 @@ bool SignConstraint::satisfied() const List SignConstraint::getCaseSplits() const { - if ( _phaseStatus != PHASE_NOT_FIXED ) + if ( getPhaseStatus() != PHASE_NOT_FIXED ) throw MarabouError( MarabouError::REQUESTED_CASE_SPLITS_FROM_FIXED_CONSTRAINT ); List splits; @@ -168,7 +167,7 @@ List SignConstraint::getCaseSplits() const List SignConstraint::getAllCases() const { - if ( _phaseStatus != PHASE_NOT_FIXED ) + if ( getPhaseStatus() != PHASE_NOT_FIXED ) throw MarabouError( MarabouError::REQUESTED_CASE_SPLITS_FROM_FIXED_CONSTRAINT ); if ( _direction == SIGN_PHASE_NEGATIVE ) @@ -220,7 +219,7 @@ PiecewiseLinearCaseSplit SignConstraint::getPositiveSplit() const bool SignConstraint::phaseFixed() const { - return _phaseStatus != PHASE_NOT_FIXED; + return getPhaseStatus() != PHASE_NOT_FIXED; } void SignConstraint::addAuxiliaryEquationsAfterPreprocessing( Query &inputQuery ) @@ -276,9 +275,9 @@ void SignConstraint::addAuxiliaryEquationsAfterPreprocessing( Query &inputQuery PiecewiseLinearCaseSplit SignConstraint::getImpliedCaseSplit() const { - ASSERT( _phaseStatus != PHASE_NOT_FIXED ); + ASSERT( getPhaseStatus() != PHASE_NOT_FIXED ); - if ( _phaseStatus == PhaseStatus::SIGN_PHASE_POSITIVE ) + if ( getPhaseStatus() == PhaseStatus::SIGN_PHASE_POSITIVE ) return getPositiveSplit(); return getNegativeSplit(); @@ -403,9 +402,9 @@ void SignConstraint::notifyLowerBound( unsigned variable, double bound ) throw InfeasibleQueryException(); _boundManager->addLemmaExplanationAndTightenBound( - _f, 1, Tightening::LB, { variable }, Tightening::LB, getType() ); + _f, 1, Tightening::LB, { variable }, Tightening::LB, *this, true, bound ); _boundManager->addLemmaExplanationAndTightenBound( - _b, 0, Tightening::LB, { variable }, Tightening::LB, getType() ); + _b, 0, Tightening::LB, { variable }, Tightening::LB, *this, false, bound ); } else { @@ -421,7 +420,7 @@ void SignConstraint::notifyLowerBound( unsigned variable, double bound ) { if ( _boundManager->shouldProduceProofs() ) _boundManager->addLemmaExplanationAndTightenBound( - _f, 1, Tightening::LB, { variable }, Tightening::LB, getType() ); + _f, 1, Tightening::LB, { variable }, Tightening::LB, *this, true, bound ); else _boundManager->tightenLowerBound( _f, 1 ); } @@ -453,9 +452,9 @@ void SignConstraint::notifyUpperBound( unsigned variable, double bound ) throw InfeasibleQueryException(); _boundManager->addLemmaExplanationAndTightenBound( - _f, -1, Tightening::UB, { variable }, Tightening::UB, getType() ); + _f, -1, Tightening::UB, { variable }, Tightening::UB, *this, true, bound ); _boundManager->addLemmaExplanationAndTightenBound( - _b, 0, Tightening::UB, { variable }, Tightening::UB, getType() ); + _b, 0, Tightening::UB, { variable }, Tightening::UB, *this, false, bound ); } else { @@ -471,7 +470,7 @@ void SignConstraint::notifyUpperBound( unsigned variable, double bound ) { if ( _boundManager->shouldProduceProofs() ) _boundManager->addLemmaExplanationAndTightenBound( - _f, -1, Tightening::UB, { variable }, Tightening::UB, getType() ); + _f, -1, Tightening::UB, { variable }, Tightening::UB, *this, true, bound ); else _boundManager->tightenUpperBound( _f, -1 ); } @@ -572,22 +571,22 @@ void SignConstraint::eliminateVariable( __attribute__( ( unused ) ) unsigned var if ( FloatUtils::areEqual( fixedValue, 1 ) ) { - ASSERT( _phaseStatus != SIGN_PHASE_NEGATIVE ); + ASSERT( getPhaseStatus() != SIGN_PHASE_NEGATIVE ); } else if ( FloatUtils::areEqual( fixedValue, -1 ) ) { - ASSERT( _phaseStatus != SIGN_PHASE_POSITIVE ); + ASSERT( getPhaseStatus() != SIGN_PHASE_POSITIVE ); } } else if ( variable == _b ) { if ( FloatUtils::gte( fixedValue, 0 ) ) { - ASSERT( _phaseStatus != SIGN_PHASE_NEGATIVE ); + ASSERT( getPhaseStatus() != SIGN_PHASE_NEGATIVE ); } else if ( FloatUtils::lt( fixedValue, 0 ) ) { - ASSERT( _phaseStatus != SIGN_PHASE_POSITIVE ); + ASSERT( getPhaseStatus() != SIGN_PHASE_POSITIVE ); } } } ); @@ -612,8 +611,8 @@ void SignConstraint::dump( String &output ) const _f, _b, _constraintActive ? "Yes" : "No", - _phaseStatus, - phaseToString( _phaseStatus ).ascii() ); + getPhaseStatus(), + phaseToString( getPhaseStatus() ).ascii() ); output += Stringf( "b in [%s, %s], ", diff --git a/src/engine/SubQuery.h b/src/engine/SubQuery.h index 7f4a12940b..c8239b5869 100644 --- a/src/engine/SubQuery.h +++ b/src/engine/SubQuery.h @@ -19,7 +19,7 @@ #include "List.h" #include "MString.h" #include "PiecewiseLinearCaseSplit.h" -#include "SmtState.h" +#include "SearchTreeState.h" #include #include @@ -33,7 +33,7 @@ struct SubQuery String _queryId; std::unique_ptr _split; - std::unique_ptr _smtState; + std::unique_ptr _searchTreeState; unsigned _timeoutInSeconds; unsigned _depth; }; diff --git a/src/engine/Tableau.cpp b/src/engine/Tableau.cpp index 21fe242875..70ea584545 100644 --- a/src/engine/Tableau.cpp +++ b/src/engine/Tableau.cpp @@ -2538,7 +2538,28 @@ void Tableau::getColumnOfBasis( unsigned column, SparseUnsortedList *result ) co void Tableau::refreshBasisFactorization() { - _basisFactorization->obtainFreshBasis(); + try + { + _basisFactorization->obtainFreshBasis(); + } + catch ( const MalformedBasisException & ) + { + ConstraintMatrixAnalyzer analyzer; + analyzer.analyze( (const SparseUnsortedList **)_sparseRowsOfA, _m, _n ); + List independentColumns = analyzer.getIndependentColumns(); + + try + { + initializeTableau( independentColumns ); + } + catch ( MalformedBasisException & ) + { + TABLEAU_LOG( "refreshBasisFactorization failed - could not refactorize basis" ); + throw MarabouError( MarabouError::FAILURE_TO_ADD_NEW_EQUATION ); + } + + computeCostFunction(); + } } unsigned Tableau::getVariableAfterMerging( unsigned variable ) const diff --git a/src/engine/tests/MockBoundManager.h b/src/engine/tests/MockBoundManager.h index 324ccad18e..584ad490c3 100644 --- a/src/engine/tests/MockBoundManager.h +++ b/src/engine/tests/MockBoundManager.h @@ -268,7 +268,9 @@ class MockBoundManager : public IBoundManager Tightening::BoundType /* affectedVarBound */, const List & /* causingVar */, Tightening::BoundType /* causingVarBound */, - PiecewiseLinearFunctionType /* constraintType */ ) + PiecewiseLinearConstraint /* constraintType */ &, + bool /* isPhaseFixing*/, + double /* minTargetBound */ ) { return true; } diff --git a/src/engine/tests/MockEngine.h b/src/engine/tests/MockEngine.h index 4bab581301..93c9daec1a 100644 --- a/src/engine/tests/MockEngine.h +++ b/src/engine/tests/MockEngine.h @@ -22,6 +22,8 @@ #include "PiecewiseLinearConstraint.h" #include "context/context.h" +#include + class String; class MockEngine : public IEngine @@ -93,6 +95,7 @@ class MockEngine : public IEngine } void postContextPopHook(){}; + void preContextPushHook(){}; mutable EngineState *lastStoredState; @@ -154,17 +157,17 @@ class MockEngine : public IEngine { } - mutable SmtState *lastRestoredSmtState; - bool restoreSmtState( SmtState &smtState ) + mutable SearchTreeState *lastRestoredSearchTreeState; + bool restoreSearchTreeState( SearchTreeState &searchTreeState ) { - lastRestoredSmtState = &smtState; + lastRestoredSearchTreeState = &searchTreeState; return true; } - mutable SmtState *lastStoredSmtState; - void storeSmtState( SmtState &smtState ) + mutable SearchTreeState *lastStoredSearchTreeState; + void storeSearchTreeState( SearchTreeState &searchTreeState ) { - lastStoredSmtState = &smtState; + lastStoredSearchTreeState = &searchTreeState; } List _constraintsToSplit; @@ -233,13 +236,9 @@ class MockEngine : public IEngine return 0.0; } - void updateGroundUpperBound( unsigned /* var */, double /* value */ ) - { - } + void updateGroundUpperBound( unsigned /* var */, double /* value */ ){}; - void updateGroundLowerBound( unsigned /*var*/, double /*value*/ ) - { - } + void updateGroundLowerBound( unsigned /*var*/, double /*value*/ ){}; double getGroundBound( unsigned /*var*/, bool /*isUpper*/ ) const { @@ -251,9 +250,7 @@ class MockEngine : public IEngine return NULL; } - void setUNSATCertificateCurrentPointer( UnsatCertificateNode * /* node*/ ) - { - } + void setUNSATCertificateCurrentPointer( UnsatCertificateNode * /* node*/ ){}; const UnsatCertificateNode *getUNSATCertificateRoot() const { @@ -265,18 +262,14 @@ class MockEngine : public IEngine return true; } - void explainSimplexFailure() - { - } + void explainSimplexFailure(){}; const BoundExplainer *getBoundExplainer() const { return NULL; } - void setBoundExplainerContent( BoundExplainer * /*boundExplainer */ ) - { - } + void setBoundExplainerContent( BoundExplainer * /*boundExplainer */ ){}; void propagateBoundManagerTightenings() { @@ -287,9 +280,12 @@ class MockEngine : public IEngine return true; } - void addPLCLemma( std::shared_ptr & /*explanation*/ ) + const List *getPiecewiseLinearConstraints() const { + return NULL; } + + void incNumOfLemmas(){}; }; #endif // __MockEngine_h__ diff --git a/src/engine/tests/Test_SmtCore.h b/src/engine/tests/Test_SearchTreeHandler.h similarity index 75% rename from src/engine/tests/Test_SmtCore.h rename to src/engine/tests/Test_SearchTreeHandler.h index eae4dea4c0..a0380aba25 100644 --- a/src/engine/tests/Test_SmtCore.h +++ b/src/engine/tests/Test_SearchTreeHandler.h @@ -1,5 +1,5 @@ /********************* */ -/*! \file Test_SmtCore.h +/*! \file Test_SearchTreeHandler.h ** \verbatim ** Top contributors (to current version): ** Guy Katz, Duligur Ibeling, Derek Huang @@ -11,7 +11,7 @@ ** ** [[ Add lengthier description here ]] - **/ +**/ #include "MockEngine.h" #include "MockErrno.h" @@ -19,20 +19,20 @@ #include "PiecewiseLinearConstraint.h" #include "Query.h" #include "ReluConstraint.h" -#include "SmtCore.h" +#include "SearchTreeHandler.h" #include #include -class MockForSmtCore +class MockForSearchTreeHandler { public: }; -class SmtCoreTestSuite : public CxxTest::TestSuite +class SearchTreeHandlerTestSuite : public CxxTest::TestSuite { public: - MockForSmtCore *mock; + MockForSearchTreeHandler *mock; MockEngine *engine; class MockConstraint : public PiecewiseLinearConstraint @@ -180,7 +180,7 @@ class SmtCoreTestSuite : public CxxTest::TestSuite void setUp() { - TS_ASSERT( mock = new MockForSmtCore ); + TS_ASSERT( mock = new MockForSearchTreeHandler ); TS_ASSERT( engine = new MockEngine ); } @@ -195,25 +195,25 @@ class SmtCoreTestSuite : public CxxTest::TestSuite ReluConstraint constraint1( 1, 2 ); ReluConstraint constraint2( 3, 4 ); - SmtCore smtCore( engine ); + SearchTreeHandler searchTreeHandler( engine ); for ( unsigned i = 0; i < (unsigned)Options::get()->getInt( Options::CONSTRAINT_VIOLATION_THRESHOLD ) - 1; ++i ) { - smtCore.reportViolatedConstraint( &constraint1 ); - TS_ASSERT( !smtCore.needToSplit() ); - smtCore.reportViolatedConstraint( &constraint2 ); - TS_ASSERT( !smtCore.needToSplit() ); + searchTreeHandler.reportViolatedConstraint( &constraint1 ); + TS_ASSERT( !searchTreeHandler.needToSplit() ); + searchTreeHandler.reportViolatedConstraint( &constraint2 ); + TS_ASSERT( !searchTreeHandler.needToSplit() ); } - smtCore.reportViolatedConstraint( &constraint2 ); - TS_ASSERT( smtCore.needToSplit() ); + searchTreeHandler.reportViolatedConstraint( &constraint2 ); + TS_ASSERT( searchTreeHandler.needToSplit() ); } void test_perform_split() { - SmtCore smtCore( engine ); + SearchTreeHandler searchTreeHandler( engine ); MockConstraint constraint; @@ -247,18 +247,18 @@ class SmtCoreTestSuite : public CxxTest::TestSuite for ( unsigned i = 0; i < (unsigned)Options::get()->getInt( Options::CONSTRAINT_VIOLATION_THRESHOLD ); ++i ) - smtCore.reportViolatedConstraint( &constraint ); + searchTreeHandler.reportViolatedConstraint( &constraint ); engine->lastStoredState = NULL; engine->lastRestoredState = NULL; - TS_ASSERT( smtCore.needToSplit() ); - TS_ASSERT_EQUALS( smtCore.getStackDepth(), 0U ); + TS_ASSERT( searchTreeHandler.needToSplit() ); + TS_ASSERT_EQUALS( searchTreeHandler.getStackDepth(), 0U ); TS_ASSERT( !constraint.setActiveWasCalled ); - TS_ASSERT_THROWS_NOTHING( smtCore.performSplit() ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.performSplit() ); TS_ASSERT( constraint.setActiveWasCalled ); - TS_ASSERT( !smtCore.needToSplit() ); - TS_ASSERT_EQUALS( smtCore.getStackDepth(), 1U ); + TS_ASSERT( !searchTreeHandler.needToSplit() ); + TS_ASSERT_EQUALS( searchTreeHandler.getStackDepth(), 1U ); // Check that Split1 was performed and tableau state was stored TS_ASSERT_EQUALS( engine->lastLowerBounds.size(), 1U ); @@ -280,8 +280,8 @@ class SmtCoreTestSuite : public CxxTest::TestSuite // Pop Split1, check that the tableau was restored and that // a Split2 was performed - TS_ASSERT( smtCore.popSplit() ); - TS_ASSERT_EQUALS( smtCore.getStackDepth(), 1U ); + TS_ASSERT( searchTreeHandler.popSplit() ); + TS_ASSERT_EQUALS( searchTreeHandler.getStackDepth(), 1U ); TS_ASSERT_EQUALS( engine->lastRestoredState, originalState ); TS_ASSERT( !engine->lastStoredState ); @@ -304,8 +304,8 @@ class SmtCoreTestSuite : public CxxTest::TestSuite // Pop Split2, check that the tableau was restored and that // a Split3 was performed - TS_ASSERT( smtCore.popSplit() ); - TS_ASSERT_EQUALS( smtCore.getStackDepth(), 1U ); + TS_ASSERT( searchTreeHandler.popSplit() ); + TS_ASSERT_EQUALS( searchTreeHandler.getStackDepth(), 1U ); TS_ASSERT_EQUALS( engine->lastRestoredState, originalState ); TS_ASSERT( !engine->lastStoredState ); @@ -324,14 +324,14 @@ class SmtCoreTestSuite : public CxxTest::TestSuite engine->lastEquations.clear(); // Final pop - TS_ASSERT( !smtCore.popSplit() ); + TS_ASSERT( !searchTreeHandler.popSplit() ); TS_ASSERT( !engine->lastRestoredState ); - TS_ASSERT_EQUALS( smtCore.getStackDepth(), 0U ); + TS_ASSERT_EQUALS( searchTreeHandler.getStackDepth(), 0U ); } void test_perform_split__inactive_constraint() { - SmtCore smtCore( engine ); + SearchTreeHandler searchTreeHandler( engine ); MockConstraint constraint; @@ -365,13 +365,13 @@ class SmtCoreTestSuite : public CxxTest::TestSuite for ( unsigned i = 0; i < (unsigned)Options::get()->getInt( Options::CONSTRAINT_VIOLATION_THRESHOLD ); ++i ) - smtCore.reportViolatedConstraint( &constraint ); + searchTreeHandler.reportViolatedConstraint( &constraint ); constraint.nextIsActive = false; - TS_ASSERT( smtCore.needToSplit() ); - TS_ASSERT_THROWS_NOTHING( smtCore.performSplit() ); - TS_ASSERT( !smtCore.needToSplit() ); + TS_ASSERT( searchTreeHandler.needToSplit() ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.performSplit() ); + TS_ASSERT( !searchTreeHandler.needToSplit() ); // Check that no split was performed @@ -383,7 +383,7 @@ class SmtCoreTestSuite : public CxxTest::TestSuite void test_all_splits_so_far() { - SmtCore smtCore( engine ); + SearchTreeHandler searchTreeHandler( engine ); MockConstraint constraint; @@ -410,13 +410,13 @@ class SmtCoreTestSuite : public CxxTest::TestSuite for ( unsigned i = 0; i < (unsigned)Options::get()->getInt( Options::CONSTRAINT_VIOLATION_THRESHOLD ); ++i ) - smtCore.reportViolatedConstraint( &constraint ); + searchTreeHandler.reportViolatedConstraint( &constraint ); constraint.nextIsActive = true; - TS_ASSERT( smtCore.needToSplit() ); - TS_ASSERT_THROWS_NOTHING( smtCore.performSplit() ); - TS_ASSERT( !smtCore.needToSplit() ); + TS_ASSERT( searchTreeHandler.needToSplit() ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.performSplit() ); + TS_ASSERT( !searchTreeHandler.needToSplit() ); // Register a valid split @@ -424,7 +424,7 @@ class SmtCoreTestSuite : public CxxTest::TestSuite PiecewiseLinearCaseSplit split3; Tightening bound5( 14, 2.3, Tightening::LB ); - TS_ASSERT_THROWS_NOTHING( smtCore.recordImpliedValidSplit( split3 ) ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.recordImpliedValidSplit( split3 ) ); // Do another real split @@ -445,17 +445,17 @@ class SmtCoreTestSuite : public CxxTest::TestSuite for ( unsigned i = 0; i < (unsigned)Options::get()->getInt( Options::CONSTRAINT_VIOLATION_THRESHOLD ); ++i ) - smtCore.reportViolatedConstraint( &constraint2 ); + searchTreeHandler.reportViolatedConstraint( &constraint2 ); constraint2.nextIsActive = true; - TS_ASSERT( smtCore.needToSplit() ); - TS_ASSERT_THROWS_NOTHING( smtCore.performSplit() ); - TS_ASSERT( !smtCore.needToSplit() ); + TS_ASSERT( searchTreeHandler.needToSplit() ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.performSplit() ); + TS_ASSERT( !searchTreeHandler.needToSplit() ); // Check that everything is received in the correct order List allSplitsSoFar; - TS_ASSERT_THROWS_NOTHING( smtCore.allSplitsSoFar( allSplitsSoFar ) ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.allSplitsSoFar( allSplitsSoFar ) ); TS_ASSERT_EQUALS( allSplitsSoFar.size(), 3U ); @@ -469,7 +469,7 @@ class SmtCoreTestSuite : public CxxTest::TestSuite TS_ASSERT_EQUALS( *it, split4 ); } - void test_store_smt_state() + void test_store_search_tree_state() { // ReLU(x0, x1) // ReLU(x2, x3) @@ -484,89 +484,89 @@ class SmtCoreTestSuite : public CxxTest::TestSuite relu1.transformToUseAuxVariables( inputQuery ); relu2.transformToUseAuxVariables( inputQuery ); - SmtCore smtCore( engine ); + SearchTreeHandler searchTreeHandler( engine ); PiecewiseLinearCaseSplit split1; Tightening bound1( 1, 0.5, Tightening::LB ); split1.storeBoundTightening( bound1 ); - TS_ASSERT_THROWS_NOTHING( smtCore.recordImpliedValidSplit( split1 ) ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.recordImpliedValidSplit( split1 ) ); for ( unsigned i = 0; i < (unsigned)Options::get()->getInt( Options::CONSTRAINT_VIOLATION_THRESHOLD ); ++i ) - smtCore.reportViolatedConstraint( &relu1 ); + searchTreeHandler.reportViolatedConstraint( &relu1 ); - TS_ASSERT( smtCore.needToSplit() ); - TS_ASSERT_THROWS_NOTHING( smtCore.performSplit() ); - TS_ASSERT( !smtCore.needToSplit() ); + TS_ASSERT( searchTreeHandler.needToSplit() ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.performSplit() ); + TS_ASSERT( !searchTreeHandler.needToSplit() ); PiecewiseLinearCaseSplit split2; Tightening bound2( 3, 2.3, Tightening::UB ); split2.storeBoundTightening( bound2 ); - TS_ASSERT_THROWS_NOTHING( smtCore.recordImpliedValidSplit( split2 ) ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.recordImpliedValidSplit( split2 ) ); for ( unsigned i = 0; i < (unsigned)Options::get()->getInt( Options::CONSTRAINT_VIOLATION_THRESHOLD ); ++i ) - smtCore.reportViolatedConstraint( &relu2 ); + searchTreeHandler.reportViolatedConstraint( &relu2 ); - TS_ASSERT( smtCore.needToSplit() ); - TS_ASSERT_THROWS_NOTHING( smtCore.performSplit() ); - TS_ASSERT( !smtCore.needToSplit() ); + TS_ASSERT( searchTreeHandler.needToSplit() ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.performSplit() ); + TS_ASSERT( !searchTreeHandler.needToSplit() ); - SmtState smtState; - smtCore.storeSmtState( smtState ); - TS_ASSERT( smtState._impliedValidSplitsAtRoot.size() == 1 ); - TS_ASSERT( *smtState._impliedValidSplitsAtRoot.begin() == split1 ); + SearchTreeState searchTreeState; + searchTreeHandler.storeSearchTreeState( searchTreeState ); + TS_ASSERT( searchTreeState._impliedValidSplitsAtRoot.size() == 1 ); + TS_ASSERT( *searchTreeState._impliedValidSplitsAtRoot.begin() == split1 ); - TS_ASSERT( smtState._stack.size() == 2 ); + TS_ASSERT( searchTreeState._stack.size() == 2 ); // Examine the first stackEntry - SmtStackEntry *stackEntry = *( smtState._stack.begin() ); + SearchTreeStackEntry *stackEntry = *( searchTreeState._stack.begin() ); TS_ASSERT( stackEntry->_activeSplit == *( relu1.getCaseSplits().begin() ) ); TS_ASSERT( *( stackEntry->_alternativeSplits.begin() ) == *( ++relu1.getCaseSplits().begin() ) ); TS_ASSERT( stackEntry->_impliedValidSplits.size() == 1 ); TS_ASSERT( *( stackEntry->_impliedValidSplits.begin() ) == split2 ); // Examine the second stackEntry - stackEntry = *( ++smtState._stack.begin() ); + stackEntry = *( ++searchTreeState._stack.begin() ); TS_ASSERT( stackEntry->_activeSplit == *( relu2.getCaseSplits().begin() ) ); TS_ASSERT( *( stackEntry->_alternativeSplits.begin() ) == *( ++relu2.getCaseSplits().begin() ) ); TS_ASSERT( stackEntry->_impliedValidSplits.size() == 0 ); - clearSmtState( smtState ); + clearSearchTreeState( searchTreeState ); - TS_ASSERT_THROWS_NOTHING( smtCore.popSplit() ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.popSplit() ); - smtCore.storeSmtState( smtState ); - TS_ASSERT( smtState._impliedValidSplitsAtRoot.size() == 1 ); - TS_ASSERT( *smtState._impliedValidSplitsAtRoot.begin() == split1 ); + searchTreeHandler.storeSearchTreeState( searchTreeState ); + TS_ASSERT( searchTreeState._impliedValidSplitsAtRoot.size() == 1 ); + TS_ASSERT( *searchTreeState._impliedValidSplitsAtRoot.begin() == split1 ); - TS_ASSERT( smtState._stack.size() == 2 ); + TS_ASSERT( searchTreeState._stack.size() == 2 ); // Examine the first stackEntry - stackEntry = *( smtState._stack.begin() ); + stackEntry = *( searchTreeState._stack.begin() ); TS_ASSERT( stackEntry->_activeSplit == *( relu1.getCaseSplits().begin() ) ); TS_ASSERT( *( stackEntry->_alternativeSplits.begin() ) == *( ++relu1.getCaseSplits().begin() ) ); TS_ASSERT( stackEntry->_impliedValidSplits.size() == 1 ); TS_ASSERT( *( stackEntry->_impliedValidSplits.begin() ) == split2 ); // Examine the second stackEntry - stackEntry = *( ++smtState._stack.begin() ); + stackEntry = *( ++searchTreeState._stack.begin() ); TS_ASSERT( stackEntry->_activeSplit == *( ++relu2.getCaseSplits().begin() ) ); TS_ASSERT( stackEntry->_alternativeSplits.empty() ); TS_ASSERT( stackEntry->_impliedValidSplits.size() == 0 ); - clearSmtState( smtState ); + clearSearchTreeState( searchTreeState ); - TS_ASSERT_THROWS_NOTHING( smtCore.popSplit() ); + TS_ASSERT_THROWS_NOTHING( searchTreeHandler.popSplit() ); } - void clearSmtState( SmtState &smtState ) + void clearSearchTreeState( SearchTreeState &searchTreeState ) { - for ( const auto &stackEntry : smtState._stack ) + for ( const auto &stackEntry : searchTreeState._stack ) delete stackEntry; - smtState._stack = List(); - smtState._impliedValidSplitsAtRoot = List(); + searchTreeState._stack = List(); + searchTreeState._impliedValidSplitsAtRoot = List(); } void test_todo() diff --git a/src/proofs/Checker.cpp b/src/proofs/Checker.cpp index c153de7feb..afc2e98a3e 100644 --- a/src/proofs/Checker.cpp +++ b/src/proofs/Checker.cpp @@ -211,6 +211,12 @@ bool Checker::checkAllPLCExplanations( const UnsatCertificateNode *node, double // NOTE, this will change as PLCLemma will for ( const auto &plcLemma : node->getPLCLemmas() ) { + if ( !plcLemma ) + continue; + + DEBUG( + ASSERT( !GlobalConfiguration::ANALYZE_PROOF_DEPENDENCIES || plcLemma->getToCheck() ) ); + PiecewiseLinearConstraint *matchedConstraint = NULL; Tightening::BoundType affectedVarBound = plcLemma->getAffectedVarBound(); double explainedBound = affectedVarBound == Tightening::UB ? FloatUtils::infinity() diff --git a/src/proofs/PlcLemma.cpp b/src/proofs/PlcLemma.cpp index c4edcca92d..796f90b1a9 100644 --- a/src/proofs/PlcLemma.cpp +++ b/src/proofs/PlcLemma.cpp @@ -14,19 +14,24 @@ #include "PlcLemma.h" +#include "Debug.h" + PLCLemma::PLCLemma( const List &causingVars, unsigned affectedVar, double bound, Tightening::BoundType causingVarBound, Tightening::BoundType affectedVarBound, const Vector &explanations, - PiecewiseLinearFunctionType constraintType ) + PiecewiseLinearFunctionType constraintType, + double minTargetBound ) : _causingVars( causingVars ) , _affectedVar( affectedVar ) , _bound( bound ) , _causingVarBound( causingVarBound ) , _affectedVarBound( affectedVarBound ) , _constraintType( constraintType ) + , _toCheck( false ) + , _minTargetBound( minTargetBound ) { if ( explanations.empty() ) _explanations = List(); @@ -96,3 +101,18 @@ PiecewiseLinearFunctionType PLCLemma::getConstraintType() const { return _constraintType; } + +bool PLCLemma::getToCheck() const +{ + return _toCheck; +} + +double PLCLemma::getMinTargetBound() const +{ + return _minTargetBound; +} + +void PLCLemma::setToCheck() +{ + _toCheck = true; +} diff --git a/src/proofs/PlcLemma.h b/src/proofs/PlcLemma.h index 909752489e..10884888bb 100644 --- a/src/proofs/PlcLemma.h +++ b/src/proofs/PlcLemma.h @@ -15,7 +15,6 @@ #ifndef __PlcExplanation_h__ #define __PlcExplanation_h__ -#include "PiecewiseLinearConstraint.h" #include "PiecewiseLinearFunctionType.h" #include "SparseUnsortedList.h" #include "Tightening.h" @@ -33,7 +32,8 @@ class PLCLemma Tightening::BoundType causingVarBound, Tightening::BoundType affectedVarBound, const Vector &explanation, - PiecewiseLinearFunctionType constraintType ); + PiecewiseLinearFunctionType constraintType, + double minTargetBound ); ~PLCLemma(); @@ -47,6 +47,10 @@ class PLCLemma Tightening::BoundType getAffectedVarBound() const; const List &getExplanations() const; PiecewiseLinearFunctionType getConstraintType() const; + bool getToCheck() const; + double getMinTargetBound() const; + + void setToCheck(); private: const List _causingVars; @@ -56,6 +60,8 @@ class PLCLemma Tightening::BoundType _affectedVarBound; List _explanations; PiecewiseLinearFunctionType _constraintType; + bool _toCheck; + double _minTargetBound; }; #endif //__PlcExplanation_h__ diff --git a/src/proofs/UnsatCertificateNode.cpp b/src/proofs/UnsatCertificateNode.cpp index 2d1c094509..f42c7f3263 100644 --- a/src/proofs/UnsatCertificateNode.cpp +++ b/src/proofs/UnsatCertificateNode.cpp @@ -158,3 +158,11 @@ bool UnsatCertificateNode::isValidNonLeaf() const { return !_contradiction && !_children.empty(); } + +void UnsatCertificateNode::deleteUnusedLemmas() +{ + if ( GlobalConfiguration::ANALYZE_PROOF_DEPENDENCIES ) + for ( auto &lemma : _PLCExplanations ) + if ( lemma && !lemma->getToCheck() ) + lemma = nullptr; +} diff --git a/src/proofs/UnsatCertificateNode.h b/src/proofs/UnsatCertificateNode.h index 42d5048086..9963f85f8c 100644 --- a/src/proofs/UnsatCertificateNode.h +++ b/src/proofs/UnsatCertificateNode.h @@ -128,6 +128,11 @@ class UnsatCertificateNode */ bool isValidNonLeaf() const; + /* + Deletes all unused lemmas + */ + void deleteUnusedLemmas(); + private: List _children; UnsatCertificateNode *_parent; diff --git a/src/proofs/UnsatCertificateUtils.cpp b/src/proofs/UnsatCertificateUtils.cpp index 88f4569451..f540859629 100644 --- a/src/proofs/UnsatCertificateUtils.cpp +++ b/src/proofs/UnsatCertificateUtils.cpp @@ -24,9 +24,6 @@ double UNSATCertificateUtils::computeBound( unsigned var, { ASSERT( var < numberOfVariables ); - double derivedBound = 0; - double temp; - if ( explanation.empty() ) return isUpper ? groundUpperBounds[var] : groundLowerBounds[var]; @@ -47,31 +44,17 @@ double UNSATCertificateUtils::computeBound( unsigned var, UNSATCertificateUtils::getExplanationRowCombination( var, explanation, explanationRowCombination, initialTableau, numberOfVariables ); - // Set the bound derived from the linear combination, using original bounds. - for ( unsigned i = 0; i < numberOfVariables; ++i ) - { - temp = explanationRowCombination[i]; - if ( !FloatUtils::isZero( temp ) ) - { - if ( isUpper ) - temp *= FloatUtils::isPositive( explanationRowCombination[i] ) - ? groundUpperBounds[i] - : groundLowerBounds[i]; - else - temp *= FloatUtils::isPositive( explanationRowCombination[i] ) - ? groundLowerBounds[i] - : groundUpperBounds[i]; - - if ( !FloatUtils::isZero( temp ) ) - derivedBound += temp; - } - } - - return derivedBound; + return isUpper ? computeCombinationUpperBound( explanationRowCombination, + groundUpperBounds, + groundLowerBounds, + numberOfVariables ) + : computeCombinationLowerBound( explanationRowCombination, + groundUpperBounds, + groundLowerBounds, + numberOfVariables ); } -void UNSATCertificateUtils::getExplanationRowCombination( unsigned var, - const SparseUnsortedList &explanation, +void UNSATCertificateUtils::getExplanationRowCombination( const SparseUnsortedList &explanation, Vector &explanationRowCombination, const SparseMatrix *initialTableau, unsigned numberOfVariables ) @@ -100,7 +83,16 @@ void UNSATCertificateUtils::getExplanationRowCombination( unsigned var, if ( FloatUtils::isZero( explanationRowCombination[i] ) ) explanationRowCombination[i] = 0; } +} +void UNSATCertificateUtils::getExplanationRowCombination( unsigned var, + const SparseUnsortedList &explanation, + Vector &explanationRowCombination, + const SparseMatrix *initialTableau, + unsigned numberOfVariables ) +{ + UNSATCertificateUtils::getExplanationRowCombination( + explanation, explanationRowCombination, initialTableau, numberOfVariables ); // Since: 0 = Sum (ci * xi) + c * var = Sum (ci * xi) + (c + 1) * var - var // We have: var = Sum (ci * xi) + (c + 1) * var ++explanationRowCombination[var]; @@ -131,17 +123,31 @@ double UNSATCertificateUtils::computeCombinationUpperBound( const SparseUnsorted } } + return computeCombinationUpperBound( + explanationRowCombination, groundUpperBounds, groundLowerBounds, numberOfVariables ); +} + +const Set UNSATCertificateUtils::getSupportedActivations() +{ + return { RELU, SIGN, ABSOLUTE_VALUE, MAX, DISJUNCTION, LEAKY_RELU }; +} + +double UNSATCertificateUtils::computeCombinationUpperBound( const Vector &combination, + const double *groundUpperBounds, + const double *groundLowerBounds, + unsigned numberOfVariables ) +{ double derivedBound = 0; double temp; // Set the bound derived from the linear combination, using original bounds. for ( unsigned i = 0; i < numberOfVariables; ++i ) { - temp = explanationRowCombination[i]; + temp = combination[i]; if ( !FloatUtils::isZero( temp ) ) { - temp *= FloatUtils::isPositive( explanationRowCombination[i] ) ? groundUpperBounds[i] - : groundLowerBounds[i]; + temp *= FloatUtils::isPositive( combination[i] ) ? groundUpperBounds[i] + : groundLowerBounds[i]; if ( !FloatUtils::isZero( temp ) ) derivedBound += temp; @@ -151,7 +157,27 @@ double UNSATCertificateUtils::computeCombinationUpperBound( const SparseUnsorted return derivedBound; } -const Set UNSATCertificateUtils::getSupportedActivations() +double UNSATCertificateUtils::computeCombinationLowerBound( const Vector &combination, + const double *groundUpperBounds, + const double *groundLowerBounds, + unsigned numberOfVariables ) { - return { RELU, SIGN, ABSOLUTE_VALUE, MAX, DISJUNCTION, LEAKY_RELU }; + double derivedBound = 0; + double temp; + + // Set the bound derived from the linear combination, using original bounds. + for ( unsigned i = 0; i < numberOfVariables; ++i ) + { + temp = combination[i]; + if ( !FloatUtils::isZero( temp ) ) + { + temp *= FloatUtils::isNegative( combination[i] ) ? groundUpperBounds[i] + : groundLowerBounds[i]; + + if ( !FloatUtils::isZero( temp ) ) + derivedBound += temp; + } + } + + return derivedBound; } diff --git a/src/proofs/UnsatCertificateUtils.h b/src/proofs/UnsatCertificateUtils.h index c599758c7c..39446e3638 100644 --- a/src/proofs/UnsatCertificateUtils.h +++ b/src/proofs/UnsatCertificateUtils.h @@ -36,6 +36,13 @@ class UNSATCertificateUtils const double *groundLowerBounds, unsigned numberOfVariables ); + /* + Given a tableau and a column vector, create a linear combination based on the vector + */ + static void getExplanationRowCombination( const SparseUnsortedList &explanation, + Vector &explanationRowCombination, + const SparseMatrix *initialTableau, + unsigned numberOfVariables ); /* Given a var, a tableau and a column vector, create a linear combination used to explain a bound @@ -52,6 +59,16 @@ class UNSATCertificateUtils const double *groundLowerBounds, unsigned numberOfVariables ); + static double computeCombinationUpperBound( const Vector &combination, + const double *groundUpperBounds, + const double *groundLowerBounds, + unsigned numberOfVariables ); + + static double computeCombinationLowerBound( const Vector &combination, + const double *groundUpperBounds, + const double *groundLowerBounds, + unsigned numberOfVariables ); + static const Set getSupportedActivations(); }; diff --git a/src/proofs/tests/Test_UnsatCertificateNode.h b/src/proofs/tests/Test_UnsatCertificateNode.h index 75a1d250a2..ca18bc8608 100644 --- a/src/proofs/tests/Test_UnsatCertificateNode.h +++ b/src/proofs/tests/Test_UnsatCertificateNode.h @@ -83,11 +83,11 @@ class UnsatCertificateNodeTestSuite : public CxxTest::TestSuite Vector emptyVec; auto explanation1 = std::shared_ptr( - new PLCLemma( { 1 }, 1, 0, Tightening::UB, Tightening::UB, emptyVec, RELU ) ); + new PLCLemma( { 1 }, 1, 0, Tightening::UB, Tightening::UB, emptyVec, RELU, 0 ) ); auto explanation2 = std::shared_ptr( - new PLCLemma( { 1 }, 1, -1, Tightening::UB, Tightening::UB, emptyVec, RELU ) ); + new PLCLemma( { 1 }, 1, -1, Tightening::UB, Tightening::UB, emptyVec, RELU, 0 ) ); auto explanation3 = std::shared_ptr( - new PLCLemma( { 1 }, 1, -4, Tightening::UB, Tightening::UB, emptyVec, RELU ) ); + new PLCLemma( { 1 }, 1, -4, Tightening::UB, Tightening::UB, emptyVec, RELU, 0 ) ); TS_ASSERT( root.getPLCLemmas().empty() );