diff --git a/src/arith_uint256.cpp b/src/arith_uint256.cpp index 8f7576e2..6abeb4fe 100755 --- a/src/arith_uint256.cpp +++ b/src/arith_uint256.cpp @@ -198,7 +198,7 @@ unsigned int base_uint::bits() const for (int pos = WIDTH - 1; pos >= 0; pos--) { if (pn[pos]) { for (int nbits = 31; nbits > 0; nbits--) { - if (pn[pos] & 1 << nbits) + if (pn[pos] & 1U << nbits) // 1U instead of 1 to avoid Shifting a signed 32-bit value by 31 bits return 32 * pos + nbits + 1; } return 32 * pos + 1; diff --git a/src/arith_uint256.h b/src/arith_uint256.h index 5a694fd4..6587b3b1 100755 --- a/src/arith_uint256.h +++ b/src/arith_uint256.h @@ -83,7 +83,7 @@ class base_uint base_uint ret; for (int i = 0; i < WIDTH; i++) ret.pn[i] = ~pn[i]; - ret++; + ++ret; // no need to use post-increment when pre-increment is available for this class return ret; } @@ -177,7 +177,7 @@ class base_uint { // prefix operator int i = 0; - while (++pn[i] == 0 && i < WIDTH - 1) + while (i < WIDTH - 1 && ++pn[i] == 0) // don't use i as an index before checking we are within limits i++; return *this; } @@ -194,7 +194,7 @@ class base_uint { // prefix operator int i = 0; - while (--pn[i] == (uint32_t)-1 && i < WIDTH - 1) + while (i < WIDTH - 1 && --pn[i] == (uint32_t)-1) // don't use i as an index before checking we are within limits i++; return *this; } diff --git a/src/core_memusage.h b/src/core_memusage.h index 9e840dcf..6340d4df 100644 --- a/src/core_memusage.h +++ b/src/core_memusage.h @@ -29,7 +29,7 @@ static inline size_t RecursiveDynamicUsage(const CTxOut& out) { static inline size_t RecursiveDynamicUsage(const CScriptWitness& scriptWit) { size_t mem = memusage::DynamicUsage(scriptWit.stack); - for (std::vector >::const_iterator it = scriptWit.stack.begin(); it != scriptWit.stack.end(); it++) { + for (std::vector >::const_iterator it = scriptWit.stack.begin(); it != scriptWit.stack.end(); ++it) { // no need to use post-increment when pre-increment is available mem += memusage::DynamicUsage(*it); } return mem; @@ -41,7 +41,7 @@ static inline size_t RecursiveDynamicUsage(const CTxInWitness& txinwit) { static inline size_t RecursiveDynamicUsage(const CTxWitness& txwit) { size_t mem = memusage::DynamicUsage(txwit.vtxinwit); - for (std::vector::const_iterator it = txwit.vtxinwit.begin(); it != txwit.vtxinwit.end(); it++) { + for (std::vector::const_iterator it = txwit.vtxinwit.begin(); it != txwit.vtxinwit.end(); ++it) { // no need to use post-increment when pre-increment is available mem += RecursiveDynamicUsage(*it); } return mem; @@ -49,10 +49,10 @@ static inline size_t RecursiveDynamicUsage(const CTxWitness& txwit) { static inline size_t RecursiveDynamicUsage(const CTransaction& tx) { size_t mem = memusage::DynamicUsage(tx.vin) + memusage::DynamicUsage(tx.vout) + RecursiveDynamicUsage(tx.wit); - for (std::vector::const_iterator it = tx.vin.begin(); it != tx.vin.end(); it++) { + for (std::vector::const_iterator it = tx.vin.begin(); it != tx.vin.end(); ++it) { // no need to use post-increment when pre-increment is available mem += RecursiveDynamicUsage(*it); } - for (std::vector::const_iterator it = tx.vout.begin(); it != tx.vout.end(); it++) { + for (std::vector::const_iterator it = tx.vout.begin(); it != tx.vout.end(); ++it) { // no need to use post-increment when pre-increment is available mem += RecursiveDynamicUsage(*it); } return mem; @@ -60,10 +60,10 @@ static inline size_t RecursiveDynamicUsage(const CTransaction& tx) { static inline size_t RecursiveDynamicUsage(const CMutableTransaction& tx) { size_t mem = memusage::DynamicUsage(tx.vin) + memusage::DynamicUsage(tx.vout) + RecursiveDynamicUsage(tx.wit); - for (std::vector::const_iterator it = tx.vin.begin(); it != tx.vin.end(); it++) { + for (std::vector::const_iterator it = tx.vin.begin(); it != tx.vin.end(); ++it) { // no need to use post-increment when pre-increment is available mem += RecursiveDynamicUsage(*it); } - for (std::vector::const_iterator it = tx.vout.begin(); it != tx.vout.end(); it++) { + for (std::vector::const_iterator it = tx.vout.begin(); it != tx.vout.end(); ++it) { // no need to use post-increment when pre-increment is available mem += RecursiveDynamicUsage(*it); } return mem; @@ -71,7 +71,7 @@ static inline size_t RecursiveDynamicUsage(const CMutableTransaction& tx) { static inline size_t RecursiveDynamicUsage(const CBlock& block) { size_t mem = memusage::DynamicUsage(block.vtx); - for (std::vector::const_iterator it = block.vtx.begin(); it != block.vtx.end(); it++) { + for (std::vector::const_iterator it = block.vtx.begin(); it != block.vtx.end(); ++it) { // no need to use post-increment when pre-increment is available mem += RecursiveDynamicUsage(*it); } return mem; diff --git a/src/crypto/lyra2/Lyra2.c b/src/crypto/lyra2/Lyra2.c index 46ff09b5..07f9b466 100644 --- a/src/crypto/lyra2/Lyra2.c +++ b/src/crypto/lyra2/Lyra2.c @@ -76,6 +76,7 @@ int LYRA2(void *K, int64_t kLen, const void *pwd, int32_t pwdlen, const void *sa //Allocates pointers to each row of the matrix uint64_t **memMatrix = malloc(sizeof(uint64_t*) * nRows); if (memMatrix == NULL) { + free(wholeMatrix); //stop a cheeky mem leak return -1; } //Places the pointers in the correct positions diff --git a/src/cryptopp/validat1.cpp b/src/cryptopp/validat1.cpp index 16f82bd3..8f416a69 100644 --- a/src/cryptopp/validat1.cpp +++ b/src/cryptopp/validat1.cpp @@ -1071,7 +1071,7 @@ bool TestNIST_DRBG() "\xF2\x86\xE4\xED\x74\xF2\x5D\x8B\x6C\x4D\xB8\xDE\xD8\x4A\xD6\x5E\xD6\x6D\xAE\xB1" "\x1B\xA2\x94\x52\x54\xAD\x3C\x3D\x25\xBD\x12\x46\x3C\xA0\x45\x9D"; - fail = !!memcmp(result, expected, 2048/8); + fail = !!memcmp(result, expected, sizeof(expected)); // Stop buffer from accessing out of bounds pass = !fail && pass; cout << (fail ? "FAILED " : "passed ") << "Hash_DRBG SHA512/256/888 (C0UNT=0, E=32, N=16, A=32, P=32)" << endl; diff --git a/src/primitives/block.h b/src/primitives/block.h index 1fe23b3e..bd7a7383 100755 --- a/src/primitives/block.h +++ b/src/primitives/block.h @@ -106,7 +106,7 @@ class CBlock : public CBlockHeader CBlock(const CBlockHeader &header) { SetNull(); - *((CBlockHeader*)this) = header; + *(static_cast(this)) = header; // don't use C-style cast } ADD_SERIALIZE_METHODS; diff --git a/src/qt/addtokenpage.cpp b/src/qt/addtokenpage.cpp index 222a4375..389eab28 100755 --- a/src/qt/addtokenpage.cpp +++ b/src/qt/addtokenpage.cpp @@ -147,6 +147,7 @@ void AddTokenPage::on_confirmButton_clicked() } } } + delete addToHex; // mem leak out of nowhere } void AddTokenPage::on_addressChanged() @@ -177,6 +178,7 @@ void AddTokenPage::on_addressChanged() m_validTokenAddress = ret; } ui->confirmButton->setEnabled(m_validTokenAddress); + delete addToHex; // mem leak out of nowhere } void AddTokenPage::on_numBlocksChanged(int newHeight) diff --git a/src/qt/hexaddressconverter.cpp b/src/qt/hexaddressconverter.cpp index 7675b824..ca87c2ed 100644 --- a/src/qt/hexaddressconverter.cpp +++ b/src/qt/hexaddressconverter.cpp @@ -46,6 +46,7 @@ void HexAddressConverter::addressChanged(const QString& address) { if(!isAddressValid) ui->resultLabel->setText(""); ui->addressEdit->setValid(isAddressValid); + delete addToHex; // mem leak } void HexAddressConverter::copyButtonClicked() { diff --git a/src/uint256.cpp b/src/uint256.cpp index 42cef1eb..bd41a61d 100755 --- a/src/uint256.cpp +++ b/src/uint256.cpp @@ -216,7 +216,7 @@ unsigned int base_uint::bits() const for (int pos = WIDTH - 1; pos >= 0; pos--) { if (pn[pos]) { for (int bits = 31; bits > 0; bits--) { - if (pn[pos] & 1 << bits) + if (pn[pos] & 1U << bits) // 1U instead of 1 to avoid Shifting a signed 32-bit value by 31 bits return 32 * pos + bits + 1; } return 32 * pos + 1; diff --git a/src/uint256.h b/src/uint256.h index 0acf7b6b..014558d3 100755 --- a/src/uint256.h +++ b/src/uint256.h @@ -98,7 +98,7 @@ class base_uint base_uint ret; for (int i = 0; i < WIDTH; i++) ret.pn[i] = ~pn[i]; - ret++; + ++ret; // no need to use post-increment when pre-increment is available for this class return ret; }