[client.keyset] rename 'Hash()' into 'CurrentHash()' [client:client.internal] rename 'update()' into 'updateConfig()'

This commit is contained in:
xdrm-brackets 2018-04-22 20:54:16 +02:00
parent 87a884abdc
commit 18dc9c786a
4 changed files with 279 additions and 233 deletions

View File

@ -1,29 +1,27 @@
package client; package client
import ( import (
"fmt"
"errors" "errors"
"fmt"
"git.xdrm.io/schastsp/client/keyset"
"git.xdrm.io/schastsp/context"
"git.xdrm.io/schastsp/lib/scha" "git.xdrm.io/schastsp/lib/scha"
"io" "io"
"git.xdrm.io/schastsp/context"
"git.xdrm.io/schastsp/client/keyset"
) )
const DEBUG = false const DEBUG = true
/* (1) Structure /* (1) Structure
---------------------------------------------------------*/ ---------------------------------------------------------*/
type T struct { type T struct {
ctx *context.T // shared context ctx *context.T // shared context
key *keyset.T; // current key key *keyset.T // current key
sync *keyset.T; // next bufferised key sync *keyset.T // next bufferised key
consumed bool; // true if 'key' must be renewed (with 'sync') consumed bool // true if 'key' must be renewed (with 'sync')
fkey *config // key file management fkey *config // key file management
fsync *config // sync file management fsync *config // sync file management
} }
/* (2) Constructor /* (2) Constructor
* *
* @ctx<context.T> Shared context * @ctx<context.T> Shared context
@ -32,29 +30,31 @@ type T struct {
---------------------------------------------------------*/ ---------------------------------------------------------*/
func New(ctx *context.T, saveDir string) (*T, error) { func New(ctx *context.T, saveDir string) (*T, error) {
var err error; var err error
inst := new(T); inst := new(T)
/* (1) Store context */ /* (1) Store context */
inst.ctx = ctx; inst.ctx = ctx
/* (2) Get file management for KEY */ /* (2) Get file management for KEY */
inst.fkey, err = Config(saveDir, "key"); inst.fkey, err = Config(saveDir, "key")
if err != nil { return nil, err } if err != nil {
return nil, err
/* (3) Get file management for SYNC */
inst.fsync, err = Config(saveDir, "sync");
if err != nil { return nil, err }
/* (4) Restore from config */
inst.update();
return inst, nil;
} }
/* (3) Get file management for SYNC */
inst.fsync, err = Config(saveDir, "sync")
if err != nil {
return nil, err
}
/* (4) Restore from config */
inst.updateConfig()
return inst, nil
}
/* (3) Processes and sends a new request /* (3) Processes and sends a new request
* *
@ -66,20 +66,25 @@ func New(ctx *context.T, saveDir string) (*T, error) {
func (c *T) Send(w io.Writer) error { func (c *T) Send(w io.Writer) error {
/* (1) Generate the request */ /* (1) Generate the request */
var x1, x2 []byte; var x1, x2 []byte
err := c.generateRequest(x1, x2); err := c.generateRequest(x1, x2)
if err != nil { return err } if err != nil {
return err
}
/* (2) Write request into writer */ /* (2) Write request into writer */
_, err = w.Write(x1) _, err = w.Write(x1)
if err != nil { return err } if err != nil {
return err
}
_, err = w.Write(x2) _, err = w.Write(x2)
if err != nil { return err } if err != nil {
return err
return nil;
} }
return nil
}
/* (4) Receives and processes a response /* (4) Receives and processes a response
* *
@ -94,9 +99,12 @@ func (c *T) Receive(r io.Reader) error {
---------------------------------------------------------*/ ---------------------------------------------------------*/
errCode := make([]byte, 1) errCode := make([]byte, 1)
read, err := r.Read(errCode) read, err := r.Read(errCode)
if err != nil { return err } if err != nil {
if uint16(read) != 1 { return errors.New("Cannot read enough bytes") } return err
}
if uint16(read) != 1 {
return errors.New("Cannot read enough bytes")
}
/* (2) Manage success /* (2) Manage success
---------------------------------------------------------*/ ---------------------------------------------------------*/
@ -105,31 +113,40 @@ func (c *T) Receive(r io.Reader) error {
/* (1) If pending migration -> migrate */ /* (1) If pending migration -> migrate */
if c.key.MigrationCode() == 2 { if c.key.MigrationCode() == 2 {
c.key.MigrationCode(3) c.key.MigrationCode(3)
if DEBUG { fmt.Printf("*** VALIDATED MIGRATION\n") } if DEBUG {
fmt.Printf("*** VALIDATED MIGRATION\n")
}
} }
/* (2) No error anyway */ /* (2) No error anyway */
return nil; return nil
} }
/* (3) Manage rescue /* (3) Manage rescue
---------------------------------------------------------*/ ---------------------------------------------------------*/
/* (1) Read y1 */ /* (1) Read y1 */
y1 := make([]byte, scha.HSIZE); y1 := make([]byte, scha.HSIZE)
read, err = r.Read(y1) read, err = r.Read(y1)
if err != nil { return err } if err != nil {
if uint16(read) != scha.HSIZE { return errors.New("Cannot read enough bytes") } return err
}
if uint16(read) != scha.HSIZE {
return errors.New("Cannot read enough bytes")
}
/* (2) Read y2 */ /* (2) Read y2 */
y2 := make([]byte, scha.HSIZE); y2 := make([]byte, scha.HSIZE)
read, err = r.Read(y2) read, err = r.Read(y2)
if err != nil { return err } if err != nil {
if uint16(read) != scha.HSIZE { return errors.New("Cannot read enough bytes") } return err
}
if uint16(read) != scha.HSIZE {
return errors.New("Cannot read enough bytes")
}
/* (3) Manage rescue mode */ /* (3) Manage rescue mode */
err = c.rescue(y1, y2); err = c.rescue(y1, y2)
/* (4) Dispatch err */ /* (4) Dispatch err */
return err return err

View File

@ -1,109 +1,111 @@
package client; package client
import ( import (
"fmt"
"encoding/binary" "encoding/binary"
"git.xdrm.io/schastsp/lib/scha" "fmt"
"git.xdrm.io/schastsp/lib/xor"
"git.xdrm.io/schastsp/lib/timeid"
"git.xdrm.io/schastsp/client/keyset" "git.xdrm.io/schastsp/client/keyset"
"git.xdrm.io/schastsp/lib/scha"
"git.xdrm.io/schastsp/lib/timeid"
"git.xdrm.io/schastsp/lib/xor"
) )
/* (1) Updates 'key' and 'sync' with files /* (1) Updates 'key' and 'sync' with files
* *
---------------------------------------------------------*/ ---------------------------------------------------------*/
func (c *T) update(){ func (c *T) updateConfig() {
var err error; var err error
/* (1) Restore if both are NIL /* (1) Restore if both are NIL
---------------------------------------------------------*/ ---------------------------------------------------------*/
if c.key == nil && c.sync == nil { if c.key == nil && c.sync == nil {
/* (1) Create default key */ /* (1) Create default key */
c.key, err = keyset.Create(c.ctx); c.key, err = keyset.Create(c.ctx)
/* (2) Fetch key */ /* (2) Fetch key */
err = c.fkey.Fetch(c.key) err = c.fkey.Fetch(c.key)
/* (3) On error -> set key to NIL */ /* (3) On error -> set key to NIL */
if err != nil { c.key = nil } if err != nil {
c.key = nil
}
/* (4) Create default sync */ /* (4) Create default sync */
c.sync, err = keyset.Create(c.ctx); c.sync, err = keyset.Create(c.ctx)
/* (5) Fetch sync */ /* (5) Fetch sync */
err = c.fsync.Fetch(c.sync) err = c.fsync.Fetch(c.sync)
/* (6) On error -> set sync to NIL */ /* (6) On error -> set sync to NIL */
if err != nil { c.sync = nil } if err != nil {
c.sync = nil
}
/* (7) Exit if all keysets have been fetched */ /* (7) Exit if all keysets have been fetched */
if c.key != nil && c.sync != nil { if c.key != nil && c.sync != nil {
return; return
} }
} }
/* (2) If cannot fetch -> create new keysets /* (2) If cannot fetch -> create new keysets
---------------------------------------------------------*/ ---------------------------------------------------------*/
if c.key == nil { if c.key == nil {
c.key, _ = keyset.Create(c.ctx); c.key, _ = keyset.Create(c.ctx)
} }
if c.sync == nil { if c.sync == nil {
c.sync, _ = keyset.Create(c.ctx); c.sync, _ = keyset.Create(c.ctx)
} }
/* (3) Store current value /* (3) Store current value
---------------------------------------------------------*/ ---------------------------------------------------------*/
/* (1) Store key */ /* (1) Store key */
err = c.fkey.Store(c.key) err = c.fkey.Store(c.key)
if err != nil { panic("Cannot store key") } if err != nil {
panic("Cannot store key")
}
/* (2) Store sync */ /* (2) Store sync */
err = c.fsync.Store(c.sync) err = c.fsync.Store(c.sync)
if err != nil { panic("Cannot store sync") } if err != nil {
panic("Cannot store sync")
} }
}
/* (2) Migrate current key /* (2) Migrate current key
* *
---------------------------------------------------------*/ ---------------------------------------------------------*/
func (c *T) migrateKey() { func (c *T) migrateKey() {
var err error; var err error
/* (1) Copy sync into key */ /* (1) Copy sync into key */
c.key = c.sync c.key = c.sync
/* (2) Regenerate sync */ /* (2) Regenerate sync */
c.sync, err = keyset.Create(c.ctx); c.sync, err = keyset.Create(c.ctx)
if err != nil { panic(err) } if err != nil {
panic(err)
}
/* (3) Store keysets to files */ /* (3) Store keysets to files */
c.update(); c.updateConfig()
} }
/* (3) Generate a new 'sync' keyset respecting mod constraints (timeid + migration)
/* (3) Generate a new key respecting mod constraints (timeid + migration)
*
* @return newKey<*keyset.T> New key if found
* NIL on error
* *
---------------------------------------------------------*/ ---------------------------------------------------------*/
func (c *T) generateKeyWithConstraints() { func (c *T) generateKeyWithConstraints() {
/* Get current hash */ /* Get current hash */
keyHash, err := c.key.Hash(); keyHash, err := c.key.CurrentHash()
if err != nil { panic(err) } if err != nil {
panic(err)
}
/* Search key one is respects contraints */ /* Search key one is respects contraints */
for true { for true {
@ -112,54 +114,61 @@ func (c *T) generateKeyWithConstraints(){
---------------------------------------------------------*/ ---------------------------------------------------------*/
_, timeMod := timeid.Generate(c.ctx.Window()) _, timeMod := timeid.Generate(c.ctx.Window())
/* (2) Generate a new sync /* (2) Generate a new sync
---------------------------------------------------------*/ ---------------------------------------------------------*/
newKey, _ := keyset.Create(c.ctx); newKey, _ := keyset.Create(c.ctx)
/* (3) Check constraints /* (3) Check constraints
---------------------------------------------------------*/ ---------------------------------------------------------*/
/* (1) Get next hash */ /* (1) Get next hash */
syncHash, err := newKey.Hash() syncHash, err := newKey.CurrentHash()
if err != nil { continue } if err != nil {
continue
}
if DEBUG { fmt.Printf("+ hash is '%x'\n", keyHash); } if DEBUG {
if DEBUG { fmt.Printf("+ next hash is '%x'\n", syncHash); } fmt.Printf("+ hash is '%x'\n", keyHash)
}
if DEBUG {
fmt.Printf("+ next hash is '%x'\n", syncHash)
}
/* (2) Get time mod difference (first byte) */ /* (2) Get time mod difference (first byte) */
timeConstraintValue := xor.Byte(keyHash[0], syncHash[0]); timeConstraintValue := xor.Byte(keyHash[0], syncHash[0])
if DEBUG { fmt.Printf(" %.2x ^ %.2x = %.2x[%d] %% 2 = %d == %d ? %t\n", keyHash[0], syncHash[0], timeConstraintValue, timeConstraintValue, uint32(timeConstraintValue) % 2, timeMod, uint32(timeConstraintValue) % 2 == timeMod ) } if DEBUG {
fmt.Printf(" %.2x ^ %.2x = %.2x[%d] %% 2 = %d == %d ? %t\n", keyHash[0], syncHash[0], timeConstraintValue, timeConstraintValue, uint32(timeConstraintValue)%2, timeMod, uint32(timeConstraintValue)%2 == timeMod)
}
/* (4) Retry if invalid time constraint */ /* (4) Retry if invalid time constraint */
if uint32(timeConstraintValue)%2 != timeMod { if uint32(timeConstraintValue)%2 != timeMod {
continue; continue
} }
/* (5) Get migration mod difference (second byte) */ /* (5) Get migration mod difference (second byte) */
migrationConstraintValue := xor.Byte(keyHash[1], syncHash[2]); migrationConstraintValue := xor.Byte(keyHash[1], syncHash[2])
if DEBUG { fmt.Printf(" %.2x ^ %.2x = %.2x[%d] %% 3 = %d == %d ? %t\n", keyHash[1], syncHash[1], migrationConstraintValue, migrationConstraintValue, uint8(migrationConstraintValue) % 3, c.key.MigrationCode(), uint8(migrationConstraintValue) % 3 == c.key.MigrationCode() ) } if DEBUG {
fmt.Printf(" %.2x ^ %.2x = %.2x[%d] %% 3 = %d == %d ? %t\n", keyHash[1], syncHash[1], migrationConstraintValue, migrationConstraintValue, uint8(migrationConstraintValue)%3, c.key.MigrationCode(), uint8(migrationConstraintValue)%3 == c.key.MigrationCode())
}
/* (6) Retry if invalid time constraint */ /* (6) Retry if invalid time constraint */
if uint8(migrationConstraintValue)%3 != c.key.MigrationCode() { if uint8(migrationConstraintValue)%3 != c.key.MigrationCode() {
continue; continue
} }
/* (7) Store new sync */ /* (7) Store new sync */
c.sync = newKey; c.sync = newKey
/* (8) Store keysets to files */ /* (8) Store keysets to files */
c.update() c.updateConfig()
break; break
} }
} }
/* (4) Generate the client request /* (4) Generate the client request
* *
* @x1<[]byte> Byte array to write into * @x1<[]byte> Byte array to write into
@ -170,18 +179,17 @@ func (c *T) generateKeyWithConstraints(){
---------------------------------------------------------*/ ---------------------------------------------------------*/
func (c *T) generateRequest(x1 []byte, x2 []byte) error { func (c *T) generateRequest(x1 []byte, x2 []byte) error {
/* (1) Migrate if validated migration /* (1) Migrate if validated migration
---------------------------------------------------------*/ ---------------------------------------------------------*/
if c.key.MigrationCode() == 3 { if c.key.MigrationCode() == 3 {
c.migrateKey(); c.migrateKey()
} }
/* (2) Decrement and get useful hashes /* (2) Decrement and get useful hashes
---------------------------------------------------------*/ ---------------------------------------------------------*/
/* (1) Decrement hash */ /* (1) Decrement hash */
remainingHashes := c.key.Decrement() remainingHashes := c.key.Decrement()
c.update() c.updateConfig()
if DEBUG { if DEBUG {
fmt.Printf("Remaining %x[%d] hashes\n", remainingHashes, remainingHashes) fmt.Printf("Remaining %x[%d] hashes\n", remainingHashes, remainingHashes)
@ -189,29 +197,31 @@ func (c *T) generateRequest(x1 []byte, x2 []byte) error {
} }
/* (2) Store current hash */ /* (2) Store current hash */
h0, err := c.key.Hash(); h0, err := c.key.CurrentHash()
if err != nil { return err } if err != nil {
return err
}
/* (3) Copy into next hash (same value) */ /* (3) Copy into next hash (same value) */
h1, err := c.key.Hash(); h1, err := c.key.CurrentHash()
if err != nil { return err } if err != nil {
return err
}
/* (3) New sync hash if key consumed /* (3) New sync hash if key consumed
---------------------------------------------------------*/ ---------------------------------------------------------*/
if c.key.MigrationCode() > 0 { if c.key.MigrationCode() > 0 {
/* (1) Generate sync with constraints */ /* (1) Generate sync with constraints */
c.generateKeyWithConstraints(); c.generateKeyWithConstraints()
/* (2) Notify key need for renewal */ /* (2) Notify key need for renewal */
c.key.MigrationCode(2); c.key.MigrationCode(2)
/* (3) Store config */ /* (3) Store config */
c.update(); c.updateConfig()
} }
/* (4) Manage time id /* (4) Manage time id
---------------------------------------------------------*/ ---------------------------------------------------------*/
/* (1) Get current time id */ /* (1) Get current time id */
@ -224,52 +234,46 @@ func (c *T) generateRequest(x1 []byte, x2 []byte) error {
/* (2) Get digest of time id */ /* (2) Get digest of time id */
hashedTimeId, err := scha.Hash(timeIdBytes, 1, nil, nil) hashedTimeId, err := scha.Hash(timeIdBytes, 1, nil, nil)
/* (5) Calculate x1 and x2 /* (5) Calculate x1 and x2
---------------------------------------------------------*/ ---------------------------------------------------------*/
/* (1) Calculate x1 = h ^ h(timeId) */ /* (1) Calculate x1 = h ^ h(timeId) */
x1 = xor.ByteArray(h0, hashedTimeId) x1 = xor.ByteArray(h0, hashedTimeId)
if DEBUG { if DEBUG {
fmt.Printf("\n=== x1 ===\n"); fmt.Printf("\n=== x1 ===\n")
fmt.Printf(" hash is h0 = %x\n", h0) fmt.Printf(" hash is h0 = %x\n", h0)
fmt.Printf(" time id is n = %x[%d]\n", timeIdBytes, timeId) fmt.Printf(" time id is n = %x[%d]\n", timeIdBytes, timeId)
fmt.Printf(" h(t) = %x\n", hashedTimeId) fmt.Printf(" h(t) = %x\n", hashedTimeId)
fmt.Printf(" ---\n"); fmt.Printf(" ---\n")
fmt.Printf(" x1 is h0+h(t) = %x\n", x1) fmt.Printf(" x1 is h0+h(t) = %x\n", x1)
fmt.Printf(" ---\n"); fmt.Printf(" ---\n")
fmt.Printf(" check x1+h(t) eq h0 = %x\n", xor.ByteArray(x1, hashedTimeId)) fmt.Printf(" check x1+h(t) eq h0 = %x\n", xor.ByteArray(x1, hashedTimeId))
fmt.Printf(" check x1+h0 eq h(t) = %x\n", xor.ByteArray(x1, h0)); fmt.Printf(" check x1+h0 eq h(t) = %x\n", xor.ByteArray(x1, h0))
} }
/* (2) Calculate x2 = h ^ h(timeId) ^ timeMod */ /* (2) Calculate x2 = h ^ h(timeId) ^ timeMod */
x2 = xor.ByteArray(h1, hashedTimeId) x2 = xor.ByteArray(h1, hashedTimeId)
// do not add time mod if not code = 0 // only add time mod if code = 0
if c.key.MigrationCode() != 0 { if c.key.MigrationCode() == 0 {
return nil; x2[0] = xor.Byte(x2[0], byte(timeMod))
} }
x2[0] = xor.Byte(x2[0], byte(timeMod))
if DEBUG { if DEBUG {
fmt.Printf("\n=== x2 ===\n"); fmt.Printf("\n=== x2 ===\n")
fmt.Printf(" next is h1 = %x\n", h1) fmt.Printf(" next is h1 = %x\n", h1)
fmt.Printf(" time mod is m = %x[%d]\n", timeMod, timeMod) fmt.Printf(" time mod is m = %x[%d]\n", timeMod, timeMod)
fmt.Printf(" h(t) = %x\n", hashedTimeId) fmt.Printf(" h(t) = %x\n", hashedTimeId)
fmt.Printf(" ---\n"); fmt.Printf(" ---\n")
fmt.Printf(" x2 is h1+h(t)+m = %x\n", x2) fmt.Printf(" x2 is h1+h(t)+m = %x\n", x2)
fmt.Printf(" ---\n"); fmt.Printf(" ---\n")
fmt.Printf(" check x2+x1 %% 2 eq m = %d (%t)\n", uint8(xor.ByteArray(x1, x2)[0]%2), xor.ByteArray(x1, x2)[0]%2 == byte(timeMod)) fmt.Printf(" check x2+x1 %% 2 eq m = %d (%t)\n", uint8(xor.ByteArray(x1, x2)[0]%2), xor.ByteArray(x1, x2)[0]%2 == byte(timeMod))
fmt.Printf(" check x2+x1 %% 3 eq o = %d (%t)\n", uint8(xor.ByteArray(x1,x2)[1] % 3), uint8(xor.ByteArray(x1,x2)[1] % 3) == c.key.MigrationCode()); fmt.Printf(" check x2+x1 %% 3 eq o = %d (%t)\n", uint8(xor.ByteArray(x1, x2)[1]%3), uint8(xor.ByteArray(x1, x2)[1]%3) == c.key.MigrationCode())
} }
return nil; return nil
} }
/* (5) Rescue management /* (5) Rescue management
* *
* @Y1<[]byte> First rescue parameter * @Y1<[]byte> First rescue parameter
@ -284,16 +288,18 @@ func (c *T) rescue(y1 []byte, y2 []byte) error {
timeMod := uint32(xor.ByteArray(y1, y2)[0] % 2) timeMod := uint32(xor.ByteArray(y1, y2)[0] % 2)
/* (2) Try to guess time id from timeM */ /* (2) Try to guess time id from timeM */
timeId := timeid.Guess(c.ctx.Window(), timeMod); timeId := timeid.Guess(c.ctx.Window(), timeMod)
var timeIdBytes []byte; var timeIdBytes []byte
binary.BigEndian.PutUint32(timeIdBytes, timeId) binary.BigEndian.PutUint32(timeIdBytes, timeId)
/* (3) Hash timeId */ /* (3) Hash timeId */
hashedTimeId, err := scha.Hash(timeIdBytes, 1, nil, nil); hashedTimeId, err := scha.Hash(timeIdBytes, 1, nil, nil)
if err != nil { return err } if err != nil {
return err
}
/* (4) Get the received hash */ /* (4) Get the received hash */
receivedHash := xor.ByteArray(y1, hashedTimeId); receivedHash := xor.ByteArray(y1, hashedTimeId)
/* (4) Try to rescue the key */ /* (4) Try to rescue the key */
err = c.key.Rescue(receivedHash) err = c.key.Rescue(receivedHash)

View File

@ -1,14 +1,14 @@
package keyset package keyset
import ( import (
"git.xdrm.io/schastsp/context"
"io"
"encoding/binary" "encoding/binary"
"errors" "errors"
"git.xdrm.io/schastsp/context"
"git.xdrm.io/schastsp/lib/scha" "git.xdrm.io/schastsp/lib/scha"
"io"
) )
const SecretSize = scha.HSIZE * 4; const SecretSize = scha.HSIZE * 4
/* Attributes */ /* Attributes */
type T struct { type T struct {
@ -23,8 +23,6 @@ type T struct {
// 3: validated migration // 3: validated migration
} }
/* (1) Creates a new KeySet /* (1) Creates a new KeySet
* *
* @ctx<Context> Context constants * @ctx<Context> Context constants
@ -51,15 +49,12 @@ func Create(ctx *context.T) (*T, error) {
return instance, nil return instance, nil
} }
/* (2) Get current hash /* (2) Get current hash
* *
* @return digest<[]byte]> Current hash representing the set * @return digest<[]byte]> Current hash representing the set
* *
---------------------------------------------------------*/ ---------------------------------------------------------*/
func (s T) Hash() ([]byte, error) { func (s T) CurrentHash() ([]byte, error) {
/* (1) Get digest */ /* (1) Get digest */
digest, err := scha.Hash(s.sec, uint(s.depth), nil, nil) digest, err := scha.Hash(s.sec, uint(s.depth), nil, nil)
@ -74,8 +69,6 @@ func (s T) Hash() ([]byte, error) {
} }
/* (3) Decrement depth /* (3) Decrement depth
* *
* @return remaining<uint> Remaining hashes before migration * @return remaining<uint> Remaining hashes before migration
@ -96,8 +89,6 @@ func (s *T) Decrement() uint16 {
} }
/* (4) Serialisation /* (4) Serialisation
* *
* @return serial<string> String representation * @return serial<string> String representation
@ -116,30 +107,36 @@ func (s *T) Decrement() uint16 {
---------------------------------------------------------*/ ---------------------------------------------------------*/
func (s *T) Store(writer io.Writer) error { func (s *T) Store(writer io.Writer) error {
var err error; var err error
/* (1) Copy secret size */ /* (1) Copy secret size */
err = binary.Write(writer, binary.BigEndian, uint16(len(s.sec))) err = binary.Write(writer, binary.BigEndian, uint16(len(s.sec)))
if err != nil { return err } if err != nil {
return err
}
/* (2) Copy secret */ /* (2) Copy secret */
err = binary.Write(writer, binary.BigEndian, s.sec) err = binary.Write(writer, binary.BigEndian, s.sec)
if err != nil { return err } if err != nil {
return err
}
/* (3) Copy depth */ /* (3) Copy depth */
err = binary.Write(writer, binary.BigEndian, s.depth) err = binary.Write(writer, binary.BigEndian, s.depth)
if err != nil { return err } if err != nil {
return err
}
/* (4) Copy migration code */ /* (4) Copy migration code */
err = binary.Write(writer, binary.BigEndian, s.mcode) err = binary.Write(writer, binary.BigEndian, s.mcode)
if err != nil { return err } if err != nil {
return err
}
return nil return nil
} }
/* (5) Builds a KeySet from its serial representation /* (5) Builds a KeySet from its serial representation
* *
* @serial<string> String representation * @serial<string> String representation
@ -155,7 +152,9 @@ func (s *T) Fetch(reader io.Reader) error {
/* (1) Read the secret size */ /* (1) Read the secret size */
err = binary.Read(reader, binary.BigEndian, &secretLength) err = binary.Read(reader, binary.BigEndian, &secretLength)
if err != nil { return err } if err != nil {
return err
}
/* (2) Fail if secretLength lower than digest size */ /* (2) Fail if secretLength lower than digest size */
if secretLength < scha.HSIZE { if secretLength < scha.HSIZE {
@ -163,9 +162,11 @@ func (s *T) Fetch(reader io.Reader) error {
} }
/* (3) Try to copy the secret */ /* (3) Try to copy the secret */
s.sec = make([]byte, secretLength); s.sec = make([]byte, secretLength)
err = binary.Read(reader, binary.BigEndian, &s.sec) err = binary.Read(reader, binary.BigEndian, &s.sec)
if err != nil { return err } if err != nil {
return err
}
/* (4) Manage invalid secret size (mismatch secretLength) */ /* (4) Manage invalid secret size (mismatch secretLength) */
if uint16(len(s.sec)) != secretLength { if uint16(len(s.sec)) != secretLength {
@ -174,17 +175,20 @@ func (s *T) Fetch(reader io.Reader) error {
/* (5) Try to copy the depth */ /* (5) Try to copy the depth */
err = binary.Read(reader, binary.BigEndian, &s.depth) err = binary.Read(reader, binary.BigEndian, &s.depth)
if err != nil { return err } if err != nil {
return err
}
/* (6) Try to copy the migration code */ /* (6) Try to copy the migration code */
err = binary.Read(reader, binary.BigEndian, &s.mcode) err = binary.Read(reader, binary.BigEndian, &s.mcode)
if err != nil { return err } if err != nil {
return err
}
return nil return nil
} }
/* (6) Getter/Setter for migration code 'mcode' /* (6) Getter/Setter for migration code 'mcode'
* *
* @mcode<uint8> [OPT] New value * @mcode<uint8> [OPT] New value
@ -207,8 +211,6 @@ func (s T) MigrationCode(optional... uint8) uint8 {
} }
/* (7) Updates depth for rescuing from desynchroisation /* (7) Updates depth for rescuing from desynchroisation
* *
* @lastHash<[]byte> Last received hash * @lastHash<[]byte> Last received hash
@ -228,11 +230,15 @@ func (s *T) Rescue(lastHash []byte) error {
for i := s.depth; i <= s.depth+s.ctx.MinDepth(); i++ { for i := s.depth; i <= s.depth+s.ctx.MinDepth(); i++ {
/* (1) Process hash */ /* (1) Process hash */
currentHash, err := s.Hash(); currentHash, err := s.CurrentHash()
if err != nil { return err } if err != nil {
return err
}
/* (2) If not found -> try again */ /* (2) If not found -> try again */
if string(currentHash) != string(lastHash) { continue } if string(currentHash) != string(lastHash) {
continue
}
/* (3) Store new depth */ /* (3) Store new depth */
s.depth = i - s.ctx.MinDepth() + 1 s.depth = i - s.ctx.MinDepth() + 1
@ -244,7 +250,6 @@ func (s *T) Rescue(lastHash []byte) error {
} }
return errors.New("Cannot find an available rescue depth"); return errors.New("Cannot find an available rescue depth")
} }

View File

@ -6,15 +6,19 @@ import (
"git.xdrm.io/schastsp/lib/scha" "git.xdrm.io/schastsp/lib/scha"
"testing" "testing"
) )
func TestGenerationDepthBoundaries(t *testing.T) { func TestGenerationDepthBoundaries(t *testing.T) {
var min, max uint16 = 0x0f0, 0xfff var min, max uint16 = 0x0f0, 0xfff
var rangeMin = min + (max-min)/2 var rangeMin = min + (max-min)/2
var rangeMax = max var rangeMax = max
var created *T; var created *T
ctx, err := context.Create(2.5, min, 0, max); ctx, err := context.Create(2.5, min, 0, max)
if err != nil { t.Errorf("Do not expected an error: %s", err); return } if err != nil {
t.Errorf("Do not expected an error: %s", err)
return
}
created, err = Create(ctx) created, err = Create(ctx)
@ -29,8 +33,11 @@ func TestGenerationDepthBoundaries(t *testing.T) {
} }
func TestSchaDecrementingProperty(t *testing.T) { func TestSchaDecrementingProperty(t *testing.T) {
ctx, err := context.Create(2.5); ctx, err := context.Create(2.5)
if err != nil { t.Errorf("Do not expected an error"); return } if err != nil {
t.Errorf("Do not expected an error")
return
}
var h1, h2, hcheck []byte var h1, h2, hcheck []byte
var created *T var created *T
@ -42,7 +49,7 @@ func TestSchaDecrementingProperty(t *testing.T) {
} }
/* (1) Get current hash */ /* (1) Get current hash */
h1, err = created.Hash() h1, err = created.CurrentHash()
if err != nil { if err != nil {
t.Errorf("Do not expected an error, got: %s", err) t.Errorf("Do not expected an error, got: %s", err)
} }
@ -51,7 +58,7 @@ func TestSchaDecrementingProperty(t *testing.T) {
created.Decrement() created.Decrement()
/* (3) Get new hash */ /* (3) Get new hash */
h2, err = created.Hash() h2, err = created.CurrentHash()
if err != nil { if err != nil {
t.Errorf("Do not expected an error, got: %s", err) t.Errorf("Do not expected an error, got: %s", err)
} }
@ -75,8 +82,11 @@ func TestSchaDecrementingProperty(t *testing.T) {
} }
func TestDecrementMinimum(t *testing.T) { func TestDecrementMinimum(t *testing.T) {
ctx, err := context.Create(2.5); ctx, err := context.Create(2.5)
if err != nil { t.Errorf("Do not expected an error"); return } if err != nil {
t.Errorf("Do not expected an error")
return
}
var h1, h2, hcheck []byte var h1, h2, hcheck []byte
var created *T var created *T
@ -88,7 +98,7 @@ func TestDecrementMinimum(t *testing.T) {
} }
/* (1) Get current hash */ /* (1) Get current hash */
h1, err = created.Hash() h1, err = created.CurrentHash()
if err != nil { if err != nil {
t.Errorf("Do not expected an error, got: %s", err) t.Errorf("Do not expected an error, got: %s", err)
} }
@ -97,7 +107,7 @@ func TestDecrementMinimum(t *testing.T) {
created.Decrement() created.Decrement()
/* (3) Get new hash */ /* (3) Get new hash */
h2, err = created.Hash() h2, err = created.CurrentHash()
if err != nil { if err != nil {
t.Errorf("Do not expected an error, got: %s", err) t.Errorf("Do not expected an error, got: %s", err)
} }
@ -120,49 +130,57 @@ func TestDecrementMinimum(t *testing.T) {
} }
func TestRestore(t *testing.T) { func TestRestore(t *testing.T) {
var buffer, srcData bytes.Buffer; var buffer, srcData bytes.Buffer
var src, dst *T; var src, dst *T
var ctx *context.T; var ctx *context.T
var err error; var err error
/* (1) Create a context */ /* (1) Create a context */
ctx, err = context.Create(2.2); ctx, err = context.Create(2.2)
if err != nil { t.Fatalf("[Unexpected error] %s", err); } if err != nil {
t.Fatalf("[Unexpected error] %s", err)
}
/* (2) Create our source */ /* (2) Create our source */
src, err = Create(ctx); src, err = Create(ctx)
if err != nil { t.Fatalf("[Unexpected error] %s", err); } if err != nil {
t.Fatalf("[Unexpected error] %s", err)
}
/* (3) Store it to buffer (+to another buffer to check after) */ /* (3) Store it to buffer (+to another buffer to check after) */
buffer.Reset(); srcData.Reset() buffer.Reset()
err = src.Store(&buffer); srcData.Reset()
if err != nil { t.Fatalf("[Unexpected error] %s", err); } err = src.Store(&buffer)
err = src.Store(&srcData); if err != nil {
if err != nil { t.Fatalf("[Unexpected error] %s", err); } t.Fatalf("[Unexpected error] %s", err)
}
err = src.Store(&srcData)
if err != nil {
t.Fatalf("[Unexpected error] %s", err)
}
/* (4) Create our target */ /* (4) Create our target */
dst, err = Create(ctx); dst, err = Create(ctx)
if err != nil { t.Fatalf("[Unexpected error] %s", err); } if err != nil {
t.Fatalf("[Unexpected error] %s", err)
}
/* (5) Restore from buffer */ /* (5) Restore from buffer */
err = dst.Fetch(&buffer); err = dst.Fetch(&buffer)
if err != nil { t.Fatalf("[Unexpected error] %s", err); } if err != nil {
t.Fatalf("[Unexpected error] %s", err)
}
/* (6) Get both data */ /* (6) Get both data */
dst.Store(&buffer) // dst data is in buffer dst.Store(&buffer) // dst data is in buffer
if err != nil { t.Fatalf("[Unexpected error] %s", err); } if err != nil {
t.Fatalf("[Unexpected error] %s", err)
}
if len(buffer.Bytes()) != len(srcData.Bytes()) || buffer.String() != srcData.String() { if len(buffer.Bytes()) != len(srcData.Bytes()) || buffer.String() != srcData.String() {
t.Fatalf("Expected restored keyset to be equal to original ;\n - expected %x\n - got %x", srcData.String(), buffer.String()); t.Fatalf("Expected restored keyset to be equal to original ;\n - expected %x\n - got %x", srcData.String(), buffer.String())
} }
} }