diff --git a/blockchain/block.go b/blockchain/block.go index d5457ce66f417ed9dc42c973392af3a6812925f4..6723f602e4521ca492bd1fd8baf789b91aa8a89b 100644 --- a/blockchain/block.go +++ b/blockchain/block.go @@ -78,9 +78,9 @@ func (b *Block) NextBlock() (*Block, error) { // GetCreated returns a copy of the created coins list func (b *Block) GetCreated() []coin.Coin { b.mutex.Lock() + defer b.mutex.Unlock() cCopy := make([]coin.Coin, len(b.created)) copy(cCopy, b.created) - b.mutex.Unlock() return cCopy } @@ -102,9 +102,9 @@ func (b *Block) AddCreated(c []coin.Coin) error { // GetDestroyed returns a copy of the destroyed coins list func (b *Block) GetDestroyed() []coin.Coin { b.mutex.Lock() + defer b.mutex.Unlock() cCopy := make([]coin.Coin, len(b.destroyed)) copy(cCopy, b.destroyed) - b.mutex.Unlock() return cCopy } @@ -139,8 +139,8 @@ func (b *Block) GetHash() (BlockHash, error) { func (b *Block) GetPreviousHash() BlockHash { var rtnBH BlockHash b.mutex.Lock() + defer b.mutex.Unlock() copy(rtnBH[:], b.previousHash[:]) - b.mutex.Unlock() return rtnBH } @@ -148,7 +148,7 @@ func (b *Block) GetPreviousHash() BlockHash { func (b *Block) GetLifecycle() BlockLifecycle { b.mutex.Lock() blc := b.lifecycle - b.mutex.Unlock() + defer b.mutex.Unlock() return blc } @@ -276,7 +276,7 @@ func Deserialize(sBlock []byte) (*Block, error) { b.id = sb.ID b.lifecycle = Baked - b.mutex.Unlock() + defer b.mutex.Unlock() return &b, nil } diff --git a/diffieHellman/dhkx.go b/diffieHellman/dhkx.go index 732caadac4c81fb5533676e6ebe3f784afd26e49..27c55d9b5a874c627acf4975528048b2d8e4376e 100644 --- a/diffieHellman/dhkx.go +++ b/diffieHellman/dhkx.go @@ -63,7 +63,7 @@ func CheckPublicKey(group *cyclic.Group, publicKey *cyclic.Int) bool { // Definition of the upper bound to p-1 upperBound := group.GetPSub1Cyclic() - //Cmp returns -1 if number is smaller, 0 if the same and 1 if bigger than. + // Cmp returns -1 if number is smaller, 0 if the same and 1 if bigger than. x := publicKey.Cmp(lowerBound) y := publicKey.Cmp(upperBound) diff --git a/fastRNG/stream.go b/fastRNG/stream.go index b0d4bb9094be8c9756331316524633e6dc61e2ca..e3d5ef8c29c96bef1652196b2094b8b6b1f584e9 100644 --- a/fastRNG/stream.go +++ b/fastRNG/stream.go @@ -38,7 +38,7 @@ type Stream struct { rng csprng.Source source []byte numStream uint - mut sync.Mutex + mutex sync.Mutex fortunaHash hash.Hash } @@ -81,21 +81,21 @@ func (sg *StreamGenerator) newStream() *Stream { // If the # of open streams exceeds streamCount, // this function blocks (and prints a log warning) until a stream is available func (sg *StreamGenerator) GetStream() *Stream { - //Initialize a stream var retStream *Stream - //If there is a stream waiting to be used, take that from the channel and return in + + // If there is a stream waiting to be used, take that from the channel and return in select { case retStream = <-sg.waitingStreams: default: } - //If there was no waiting channels, ie we exited the select statement + // If there was no waiting channels, ie we exited the select statement if retStream == nil { - //If we have not reached the maximum amount of streams (specified by streamCount), then create a new one + // If we have not reached the maximum amount of streams (specified by streamCount), then create a new one if sg.numStreams < sg.maxStreams { retStream = sg.newStream() } else { - //Else block until a stream is put in the waiting channel + // Else block until a stream is put in the waiting channel retStream = <-sg.waitingStreams } } @@ -112,9 +112,10 @@ func (sg *StreamGenerator) Close(stream *Stream) { // BlockSize into AES then run it until blockSize*scalingFactor bytes are read. Every time // BlockSize*scalingFactor bytes are read this functions blocks until it rereads csprng.Source. func (s *Stream) Read(b []byte) (int, error) { - s.mut.Lock() + s.mutex.Lock() + defer s.mutex.Unlock() + if len(b)%aes.BlockSize != 0 { - s.mut.Unlock() return 0, errors.New("requested read length is not byte aligned") } @@ -124,19 +125,18 @@ func (s *Stream) Read(b []byte) (int, error) { counter := make([]byte, aes.BlockSize) count := uint64(0) for block := 0; block < len(b)/aes.BlockSize; block++ { - //Little endian used as a straightforward way to increment a byte array + // Little endian used as a straightforward way to increment a byte array count++ binary.LittleEndian.PutUint64(counter, count) var extension []byte - //Decrease the entropy count + // Decrease the entropy count s.entropyCnt-- - //If entropyCnt is decreased too far, add an extension and set the entropyCnt + // If entropyCnt is decreased too far, add an extension and set the entropyCnt if s.entropyCnt == 0 { extension = make([]byte, aes.BlockSize) _, err := s.rng.Read(extension) if err != nil { - s.mut.Unlock() return 0, err } s.entropyCnt = s.streamGen.scalingFactor @@ -149,24 +149,25 @@ func (s *Stream) Read(b []byte) (int, error) { copy(s.source, dst) - s.mut.Unlock() return len(b), nil } // The Fortuna construction is used to generate randomness func Fortuna(src, ext []byte, fortunaHash hash.Hash) cipher.Stream { - //Create a key based on the hash of the src and an extension + // Create a key based on the hash of the src and an extension // extension used if entropyCnt had reached 0 fortunaHash.Reset() fortunaHash.Write(src) fortunaHash.Write(ext) key := fortunaHash.Sum(nil) - //Initialize a block cipher on that key + + // Initialize a block cipher on that key block, err := aes.NewCipher(key[:aes.BlockSize]) if err != nil { jww.FATAL.Panicf(err.Error()) } - //Encrypt the counter and place into destination + + // Encrypt the counter and place into destination return cipher.NewCTR(block, key[aes.BlockSize:2*aes.BlockSize]) } @@ -174,6 +175,5 @@ func Fortuna(src, ext []byte, fortunaHash hash.Hash) cipher.Stream { // csprng.Source interface. func (s *Stream) SetSeed(seed []byte) error { jww.INFO.Printf("Stream does not utilise SetSeed().") - return nil }