[client.keyset] rename 'Hash()' into 'CurrentHash()' [client:client.internal] rename 'update()' into 'updateConfig()'
This commit is contained in:
parent
87a884abdc
commit
18dc9c786a
|
@ -1,29 +1,27 @@
|
|||
package client;
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
"fmt"
|
||||
"git.xdrm.io/schastsp/client/keyset"
|
||||
"git.xdrm.io/schastsp/context"
|
||||
"git.xdrm.io/schastsp/lib/scha"
|
||||
"io"
|
||||
"git.xdrm.io/schastsp/context"
|
||||
"git.xdrm.io/schastsp/client/keyset"
|
||||
)
|
||||
|
||||
const DEBUG = false
|
||||
|
||||
const DEBUG = true
|
||||
|
||||
/* (1) Structure
|
||||
---------------------------------------------------------*/
|
||||
type T struct {
|
||||
ctx *context.T // shared context
|
||||
key *keyset.T; // current key
|
||||
sync *keyset.T; // next bufferised key
|
||||
consumed bool; // true if 'key' must be renewed (with 'sync')
|
||||
key *keyset.T // current key
|
||||
sync *keyset.T // next bufferised key
|
||||
consumed bool // true if 'key' must be renewed (with 'sync')
|
||||
fkey *config // key file management
|
||||
fsync *config // sync file management
|
||||
}
|
||||
|
||||
|
||||
/* (2) Constructor
|
||||
*
|
||||
* @ctx<context.T> Shared context
|
||||
|
@ -32,30 +30,32 @@ type T struct {
|
|||
---------------------------------------------------------*/
|
||||
func New(ctx *context.T, saveDir string) (*T, error) {
|
||||
|
||||
var err error;
|
||||
var err error
|
||||
|
||||
inst := new(T);
|
||||
inst := new(T)
|
||||
|
||||
/* (1) Store context */
|
||||
inst.ctx = ctx;
|
||||
inst.ctx = ctx
|
||||
|
||||
/* (2) Get file management for KEY */
|
||||
inst.fkey, err = Config(saveDir, "key");
|
||||
if err != nil { return nil, err }
|
||||
inst.fkey, err = Config(saveDir, "key")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
/* (3) Get file management for SYNC */
|
||||
inst.fsync, err = Config(saveDir, "sync");
|
||||
if err != nil { return nil, err }
|
||||
inst.fsync, err = Config(saveDir, "sync")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
/* (4) Restore from config */
|
||||
inst.update();
|
||||
inst.updateConfig()
|
||||
|
||||
return inst, nil;
|
||||
return inst, nil
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* (3) Processes and sends a new request
|
||||
*
|
||||
* @w<io.Writer> Writer to send into
|
||||
|
@ -66,21 +66,26 @@ func New(ctx *context.T, saveDir string) (*T, error) {
|
|||
func (c *T) Send(w io.Writer) error {
|
||||
|
||||
/* (1) Generate the request */
|
||||
var x1, x2 []byte;
|
||||
var x1, x2 []byte
|
||||
|
||||
err := c.generateRequest(x1, x2);
|
||||
if err != nil { return err }
|
||||
err := c.generateRequest(x1, x2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* (2) Write request into writer */
|
||||
_, err = w.Write(x1)
|
||||
if err != nil { return err }
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(x2)
|
||||
if err != nil { return err }
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil;
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
/* (4) Receives and processes a response
|
||||
*
|
||||
* @w<io.Reader> Reader to receive from
|
||||
|
@ -94,9 +99,12 @@ func (c *T) Receive(r io.Reader) error {
|
|||
---------------------------------------------------------*/
|
||||
errCode := make([]byte, 1)
|
||||
read, err := r.Read(errCode)
|
||||
if err != nil { return err }
|
||||
if uint16(read) != 1 { return errors.New("Cannot read enough bytes") }
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uint16(read) != 1 {
|
||||
return errors.New("Cannot read enough bytes")
|
||||
}
|
||||
|
||||
/* (2) Manage success
|
||||
---------------------------------------------------------*/
|
||||
|
@ -105,31 +113,40 @@ func (c *T) Receive(r io.Reader) error {
|
|||
/* (1) If pending migration -> migrate */
|
||||
if c.key.MigrationCode() == 2 {
|
||||
c.key.MigrationCode(3)
|
||||
if DEBUG { fmt.Printf("*** VALIDATED MIGRATION\n") }
|
||||
if DEBUG {
|
||||
fmt.Printf("*** VALIDATED MIGRATION\n")
|
||||
}
|
||||
}
|
||||
|
||||
/* (2) No error anyway */
|
||||
return nil;
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* (3) Manage rescue
|
||||
---------------------------------------------------------*/
|
||||
/* (1) Read y1 */
|
||||
y1 := make([]byte, scha.HSIZE);
|
||||
y1 := make([]byte, scha.HSIZE)
|
||||
read, err = r.Read(y1)
|
||||
if err != nil { return err }
|
||||
if uint16(read) != scha.HSIZE { return errors.New("Cannot read enough bytes") }
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uint16(read) != scha.HSIZE {
|
||||
return errors.New("Cannot read enough bytes")
|
||||
}
|
||||
|
||||
/* (2) Read y2 */
|
||||
y2 := make([]byte, scha.HSIZE);
|
||||
y2 := make([]byte, scha.HSIZE)
|
||||
read, err = r.Read(y2)
|
||||
if err != nil { return err }
|
||||
if uint16(read) != scha.HSIZE { return errors.New("Cannot read enough bytes") }
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uint16(read) != scha.HSIZE {
|
||||
return errors.New("Cannot read enough bytes")
|
||||
}
|
||||
|
||||
/* (3) Manage rescue mode */
|
||||
err = c.rescue(y1, y2);
|
||||
err = c.rescue(y1, y2)
|
||||
|
||||
/* (4) Dispatch err */
|
||||
return err
|
||||
|
|
|
@ -1,109 +1,111 @@
|
|||
package client;
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"encoding/binary"
|
||||
"git.xdrm.io/schastsp/lib/scha"
|
||||
"git.xdrm.io/schastsp/lib/xor"
|
||||
"git.xdrm.io/schastsp/lib/timeid"
|
||||
"fmt"
|
||||
"git.xdrm.io/schastsp/client/keyset"
|
||||
"git.xdrm.io/schastsp/lib/scha"
|
||||
"git.xdrm.io/schastsp/lib/timeid"
|
||||
"git.xdrm.io/schastsp/lib/xor"
|
||||
)
|
||||
|
||||
/* (1) Updates 'key' and 'sync' with files
|
||||
*
|
||||
---------------------------------------------------------*/
|
||||
func (c *T) update(){
|
||||
func (c *T) updateConfig() {
|
||||
|
||||
var err error;
|
||||
var err error
|
||||
|
||||
/* (1) Restore if both are NIL
|
||||
---------------------------------------------------------*/
|
||||
if c.key == nil && c.sync == nil {
|
||||
|
||||
/* (1) Create default key */
|
||||
c.key, err = keyset.Create(c.ctx);
|
||||
c.key, err = keyset.Create(c.ctx)
|
||||
|
||||
/* (2) Fetch key */
|
||||
err = c.fkey.Fetch(c.key)
|
||||
|
||||
/* (3) On error -> set key to NIL */
|
||||
if err != nil { c.key = nil }
|
||||
if err != nil {
|
||||
c.key = nil
|
||||
}
|
||||
|
||||
/* (4) Create default sync */
|
||||
c.sync, err = keyset.Create(c.ctx);
|
||||
c.sync, err = keyset.Create(c.ctx)
|
||||
|
||||
/* (5) Fetch sync */
|
||||
err = c.fsync.Fetch(c.sync)
|
||||
|
||||
/* (6) On error -> set sync to NIL */
|
||||
if err != nil { c.sync = nil }
|
||||
if err != nil {
|
||||
c.sync = nil
|
||||
}
|
||||
|
||||
/* (7) Exit if all keysets have been fetched */
|
||||
if c.key != nil && c.sync != nil {
|
||||
return;
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* (2) If cannot fetch -> create new keysets
|
||||
---------------------------------------------------------*/
|
||||
if c.key == nil{
|
||||
c.key, _ = keyset.Create(c.ctx);
|
||||
if c.key == nil {
|
||||
c.key, _ = keyset.Create(c.ctx)
|
||||
}
|
||||
|
||||
if c.sync == nil{
|
||||
c.sync, _ = keyset.Create(c.ctx);
|
||||
if c.sync == nil {
|
||||
c.sync, _ = keyset.Create(c.ctx)
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* (3) Store current value
|
||||
---------------------------------------------------------*/
|
||||
/* (1) Store key */
|
||||
err = c.fkey.Store(c.key)
|
||||
if err != nil { panic("Cannot store key") }
|
||||
if err != nil {
|
||||
panic("Cannot store key")
|
||||
}
|
||||
|
||||
/* (2) Store sync */
|
||||
err = c.fsync.Store(c.sync)
|
||||
if err != nil { panic("Cannot store sync") }
|
||||
if err != nil {
|
||||
panic("Cannot store sync")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* (2) Migrate current key
|
||||
*
|
||||
---------------------------------------------------------*/
|
||||
func (c *T) migrateKey(){
|
||||
func (c *T) migrateKey() {
|
||||
|
||||
var err error;
|
||||
var err error
|
||||
|
||||
/* (1) Copy sync into key */
|
||||
c.key = c.sync
|
||||
|
||||
/* (2) Regenerate sync */
|
||||
c.sync, err = keyset.Create(c.ctx);
|
||||
if err != nil { panic(err) }
|
||||
c.sync, err = keyset.Create(c.ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
/* (3) Store keysets to files */
|
||||
c.update();
|
||||
c.updateConfig()
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* (3) Generate a new key respecting mod constraints (timeid + migration)
|
||||
*
|
||||
* @return newKey<*keyset.T> New key if found
|
||||
* NIL on error
|
||||
/* (3) Generate a new 'sync' keyset respecting mod constraints (timeid + migration)
|
||||
*
|
||||
---------------------------------------------------------*/
|
||||
func (c *T) generateKeyWithConstraints(){
|
||||
func (c *T) generateKeyWithConstraints() {
|
||||
|
||||
/* Get current hash */
|
||||
keyHash, err := c.key.Hash();
|
||||
if err != nil { panic(err) }
|
||||
|
||||
keyHash, err := c.key.CurrentHash()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
/* Search key one is respects contraints */
|
||||
for true {
|
||||
|
@ -112,54 +114,61 @@ func (c *T) generateKeyWithConstraints(){
|
|||
---------------------------------------------------------*/
|
||||
_, timeMod := timeid.Generate(c.ctx.Window())
|
||||
|
||||
|
||||
/* (2) Generate a new sync
|
||||
---------------------------------------------------------*/
|
||||
newKey, _ := keyset.Create(c.ctx);
|
||||
|
||||
newKey, _ := keyset.Create(c.ctx)
|
||||
|
||||
/* (3) Check constraints
|
||||
---------------------------------------------------------*/
|
||||
/* (1) Get next hash */
|
||||
syncHash, err := newKey.Hash()
|
||||
if err != nil { continue }
|
||||
syncHash, err := newKey.CurrentHash()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if DEBUG { fmt.Printf("+ hash is '%x'\n", keyHash); }
|
||||
if DEBUG { fmt.Printf("+ next hash is '%x'\n", syncHash); }
|
||||
if DEBUG {
|
||||
fmt.Printf("+ hash is '%x'\n", keyHash)
|
||||
}
|
||||
if DEBUG {
|
||||
fmt.Printf("+ next hash is '%x'\n", syncHash)
|
||||
}
|
||||
|
||||
/* (2) Get time mod difference (first byte) */
|
||||
timeConstraintValue := xor.Byte(keyHash[0], syncHash[0]);
|
||||
timeConstraintValue := xor.Byte(keyHash[0], syncHash[0])
|
||||
|
||||
if DEBUG { fmt.Printf(" %.2x ^ %.2x = %.2x[%d] %% 2 = %d == %d ? %t\n", keyHash[0], syncHash[0], timeConstraintValue, timeConstraintValue, uint32(timeConstraintValue) % 2, timeMod, uint32(timeConstraintValue) % 2 == timeMod ) }
|
||||
if DEBUG {
|
||||
fmt.Printf(" %.2x ^ %.2x = %.2x[%d] %% 2 = %d == %d ? %t\n", keyHash[0], syncHash[0], timeConstraintValue, timeConstraintValue, uint32(timeConstraintValue)%2, timeMod, uint32(timeConstraintValue)%2 == timeMod)
|
||||
}
|
||||
|
||||
/* (4) Retry if invalid time constraint */
|
||||
if uint32(timeConstraintValue) % 2 != timeMod {
|
||||
continue;
|
||||
if uint32(timeConstraintValue)%2 != timeMod {
|
||||
continue
|
||||
}
|
||||
|
||||
/* (5) Get migration mod difference (second byte) */
|
||||
migrationConstraintValue := xor.Byte(keyHash[1], syncHash[2]);
|
||||
migrationConstraintValue := xor.Byte(keyHash[1], syncHash[2])
|
||||
|
||||
if DEBUG { fmt.Printf(" %.2x ^ %.2x = %.2x[%d] %% 3 = %d == %d ? %t\n", keyHash[1], syncHash[1], migrationConstraintValue, migrationConstraintValue, uint8(migrationConstraintValue) % 3, c.key.MigrationCode(), uint8(migrationConstraintValue) % 3 == c.key.MigrationCode() ) }
|
||||
if DEBUG {
|
||||
fmt.Printf(" %.2x ^ %.2x = %.2x[%d] %% 3 = %d == %d ? %t\n", keyHash[1], syncHash[1], migrationConstraintValue, migrationConstraintValue, uint8(migrationConstraintValue)%3, c.key.MigrationCode(), uint8(migrationConstraintValue)%3 == c.key.MigrationCode())
|
||||
}
|
||||
|
||||
/* (6) Retry if invalid time constraint */
|
||||
if uint8(migrationConstraintValue) % 3 != c.key.MigrationCode() {
|
||||
continue;
|
||||
if uint8(migrationConstraintValue)%3 != c.key.MigrationCode() {
|
||||
continue
|
||||
}
|
||||
|
||||
/* (7) Store new sync */
|
||||
c.sync = newKey;
|
||||
c.sync = newKey
|
||||
|
||||
/* (8) Store keysets to files */
|
||||
c.update()
|
||||
c.updateConfig()
|
||||
|
||||
break;
|
||||
break
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* (4) Generate the client request
|
||||
*
|
||||
* @x1<[]byte> Byte array to write into
|
||||
|
@ -170,48 +179,49 @@ func (c *T) generateKeyWithConstraints(){
|
|||
---------------------------------------------------------*/
|
||||
func (c *T) generateRequest(x1 []byte, x2 []byte) error {
|
||||
|
||||
|
||||
/* (1) Migrate if validated migration
|
||||
---------------------------------------------------------*/
|
||||
if c.key.MigrationCode() == 3 {
|
||||
c.migrateKey();
|
||||
c.migrateKey()
|
||||
}
|
||||
|
||||
/* (2) Decrement and get useful hashes
|
||||
---------------------------------------------------------*/
|
||||
/* (1) Decrement hash */
|
||||
remainingHashes := c.key.Decrement()
|
||||
c.update()
|
||||
c.updateConfig()
|
||||
|
||||
if DEBUG {
|
||||
fmt.Printf("Remaining %x[%d] hashes\n", remainingHashes, remainingHashes)
|
||||
fmt.Printf("Migration code is %d\n", c.key.MigrationCode())
|
||||
}
|
||||
if DEBUG {
|
||||
fmt.Printf("Remaining %x[%d] hashes\n", remainingHashes, remainingHashes)
|
||||
fmt.Printf("Migration code is %d\n", c.key.MigrationCode())
|
||||
}
|
||||
|
||||
/* (2) Store current hash */
|
||||
h0, err := c.key.Hash();
|
||||
if err != nil { return err }
|
||||
h0, err := c.key.CurrentHash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* (3) Copy into next hash (same value) */
|
||||
h1, err := c.key.Hash();
|
||||
if err != nil { return err }
|
||||
|
||||
h1, err := c.key.CurrentHash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* (3) New sync hash if key consumed
|
||||
---------------------------------------------------------*/
|
||||
if c.key.MigrationCode() > 0 {
|
||||
|
||||
/* (1) Generate sync with constraints */
|
||||
c.generateKeyWithConstraints();
|
||||
c.generateKeyWithConstraints()
|
||||
|
||||
/* (2) Notify key need for renewal */
|
||||
c.key.MigrationCode(2);
|
||||
c.key.MigrationCode(2)
|
||||
|
||||
/* (3) Store config */
|
||||
c.update();
|
||||
c.updateConfig()
|
||||
}
|
||||
|
||||
|
||||
/* (4) Manage time id
|
||||
---------------------------------------------------------*/
|
||||
/* (1) Get current time id */
|
||||
|
@ -224,52 +234,46 @@ func (c *T) generateRequest(x1 []byte, x2 []byte) error {
|
|||
/* (2) Get digest of time id */
|
||||
hashedTimeId, err := scha.Hash(timeIdBytes, 1, nil, nil)
|
||||
|
||||
|
||||
|
||||
/* (5) Calculate x1 and x2
|
||||
---------------------------------------------------------*/
|
||||
/* (1) Calculate x1 = h ^ h(timeId) */
|
||||
x1 = xor.ByteArray(h0, hashedTimeId)
|
||||
|
||||
if DEBUG {
|
||||
fmt.Printf("\n=== x1 ===\n");
|
||||
fmt.Printf(" hash is h0 = %x\n", h0)
|
||||
fmt.Printf(" time id is n = %x[%d]\n", timeIdBytes, timeId)
|
||||
fmt.Printf(" h(t) = %x\n", hashedTimeId)
|
||||
fmt.Printf(" ---\n");
|
||||
fmt.Printf(" x1 is h0+h(t) = %x\n", x1)
|
||||
fmt.Printf(" ---\n");
|
||||
fmt.Printf(" check x1+h(t) eq h0 = %x\n", xor.ByteArray(x1, hashedTimeId))
|
||||
fmt.Printf(" check x1+h0 eq h(t) = %x\n", xor.ByteArray(x1, h0));
|
||||
}
|
||||
if DEBUG {
|
||||
fmt.Printf("\n=== x1 ===\n")
|
||||
fmt.Printf(" hash is h0 = %x\n", h0)
|
||||
fmt.Printf(" time id is n = %x[%d]\n", timeIdBytes, timeId)
|
||||
fmt.Printf(" h(t) = %x\n", hashedTimeId)
|
||||
fmt.Printf(" ---\n")
|
||||
fmt.Printf(" x1 is h0+h(t) = %x\n", x1)
|
||||
fmt.Printf(" ---\n")
|
||||
fmt.Printf(" check x1+h(t) eq h0 = %x\n", xor.ByteArray(x1, hashedTimeId))
|
||||
fmt.Printf(" check x1+h0 eq h(t) = %x\n", xor.ByteArray(x1, h0))
|
||||
}
|
||||
|
||||
/* (2) Calculate x2 = h ^ h(timeId) ^ timeMod */
|
||||
x2 = xor.ByteArray(h1, hashedTimeId)
|
||||
|
||||
// do not add time mod if not code = 0
|
||||
if c.key.MigrationCode() != 0 {
|
||||
return nil;
|
||||
// only add time mod if code = 0
|
||||
if c.key.MigrationCode() == 0 {
|
||||
x2[0] = xor.Byte(x2[0], byte(timeMod))
|
||||
}
|
||||
|
||||
x2[0] = xor.Byte(x2[0], byte(timeMod))
|
||||
if DEBUG {
|
||||
fmt.Printf("\n=== x2 ===\n")
|
||||
fmt.Printf(" next is h1 = %x\n", h1)
|
||||
fmt.Printf(" time mod is m = %x[%d]\n", timeMod, timeMod)
|
||||
fmt.Printf(" h(t) = %x\n", hashedTimeId)
|
||||
fmt.Printf(" ---\n")
|
||||
fmt.Printf(" x2 is h1+h(t)+m = %x\n", x2)
|
||||
fmt.Printf(" ---\n")
|
||||
fmt.Printf(" check x2+x1 %% 2 eq m = %d (%t)\n", uint8(xor.ByteArray(x1, x2)[0]%2), xor.ByteArray(x1, x2)[0]%2 == byte(timeMod))
|
||||
fmt.Printf(" check x2+x1 %% 3 eq o = %d (%t)\n", uint8(xor.ByteArray(x1, x2)[1]%3), uint8(xor.ByteArray(x1, x2)[1]%3) == c.key.MigrationCode())
|
||||
}
|
||||
|
||||
if DEBUG {
|
||||
fmt.Printf("\n=== x2 ===\n");
|
||||
fmt.Printf(" next is h1 = %x\n", h1)
|
||||
fmt.Printf(" time mod is m = %x[%d]\n", timeMod, timeMod)
|
||||
fmt.Printf(" h(t) = %x\n", hashedTimeId)
|
||||
fmt.Printf(" ---\n");
|
||||
fmt.Printf(" x2 is h1+h(t)+m = %x\n", x2)
|
||||
fmt.Printf(" ---\n");
|
||||
fmt.Printf(" check x2+x1 %% 2 eq m = %d (%t)\n", uint8(xor.ByteArray(x1,x2)[0] % 2), xor.ByteArray(x1,x2)[0] % 2 == byte(timeMod))
|
||||
fmt.Printf(" check x2+x1 %% 3 eq o = %d (%t)\n", uint8(xor.ByteArray(x1,x2)[1] % 3), uint8(xor.ByteArray(x1,x2)[1] % 3) == c.key.MigrationCode());
|
||||
}
|
||||
|
||||
return nil;
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* (5) Rescue management
|
||||
*
|
||||
* @Y1<[]byte> First rescue parameter
|
||||
|
@ -281,19 +285,21 @@ func (c *T) generateRequest(x1 []byte, x2 []byte) error {
|
|||
func (c *T) rescue(y1 []byte, y2 []byte) error {
|
||||
|
||||
/* (1) Extract time mod */
|
||||
timeMod := uint32( xor.ByteArray(y1, y2)[0] % 2 )
|
||||
timeMod := uint32(xor.ByteArray(y1, y2)[0] % 2)
|
||||
|
||||
/* (2) Try to guess time id from timeM */
|
||||
timeId := timeid.Guess(c.ctx.Window(), timeMod);
|
||||
var timeIdBytes []byte;
|
||||
timeId := timeid.Guess(c.ctx.Window(), timeMod)
|
||||
var timeIdBytes []byte
|
||||
binary.BigEndian.PutUint32(timeIdBytes, timeId)
|
||||
|
||||
/* (3) Hash timeId */
|
||||
hashedTimeId, err := scha.Hash(timeIdBytes, 1, nil, nil);
|
||||
if err != nil { return err }
|
||||
hashedTimeId, err := scha.Hash(timeIdBytes, 1, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* (4) Get the received hash */
|
||||
receivedHash := xor.ByteArray(y1, hashedTimeId);
|
||||
receivedHash := xor.ByteArray(y1, hashedTimeId)
|
||||
|
||||
/* (4) Try to rescue the key */
|
||||
err = c.key.Rescue(receivedHash)
|
||||
|
|
|
@ -1,30 +1,28 @@
|
|||
package keyset
|
||||
|
||||
import (
|
||||
"git.xdrm.io/schastsp/context"
|
||||
"io"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"git.xdrm.io/schastsp/context"
|
||||
"git.xdrm.io/schastsp/lib/scha"
|
||||
"io"
|
||||
)
|
||||
|
||||
const SecretSize = scha.HSIZE * 4;
|
||||
const SecretSize = scha.HSIZE * 4
|
||||
|
||||
/* Attributes */
|
||||
type T struct {
|
||||
ctx *context.T // current context
|
||||
depth uint16 // cur depth
|
||||
ctx *context.T // current context
|
||||
depth uint16 // cur depth
|
||||
|
||||
sec []byte // secret
|
||||
mcode uint8 // migration code (secret renewal)
|
||||
sec []byte // secret
|
||||
mcode uint8 // migration code (secret renewal)
|
||||
// 0: none
|
||||
// 1: need to migrate
|
||||
// 2: waiting migration
|
||||
// 3: validated migration
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* (1) Creates a new KeySet
|
||||
*
|
||||
* @ctx<Context> Context constants
|
||||
|
@ -51,15 +49,12 @@ func Create(ctx *context.T) (*T, error) {
|
|||
return instance, nil
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/* (2) Get current hash
|
||||
*
|
||||
* @return digest<[]byte]> Current hash representing the set
|
||||
*
|
||||
---------------------------------------------------------*/
|
||||
func (s T) Hash() ([]byte, error) {
|
||||
func (s T) CurrentHash() ([]byte, error) {
|
||||
|
||||
/* (1) Get digest */
|
||||
digest, err := scha.Hash(s.sec, uint(s.depth), nil, nil)
|
||||
|
@ -74,8 +69,6 @@ func (s T) Hash() ([]byte, error) {
|
|||
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* (3) Decrement depth
|
||||
*
|
||||
* @return remaining<uint> Remaining hashes before migration
|
||||
|
@ -96,8 +89,6 @@ func (s *T) Decrement() uint16 {
|
|||
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* (4) Serialisation
|
||||
*
|
||||
* @return serial<string> String representation
|
||||
|
@ -116,30 +107,36 @@ func (s *T) Decrement() uint16 {
|
|||
---------------------------------------------------------*/
|
||||
func (s *T) Store(writer io.Writer) error {
|
||||
|
||||
var err error;
|
||||
var err error
|
||||
|
||||
/* (1) Copy secret size */
|
||||
err = binary.Write(writer, binary.BigEndian, uint16(len(s.sec)))
|
||||
if err != nil { return err }
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* (2) Copy secret */
|
||||
err = binary.Write(writer, binary.BigEndian, s.sec)
|
||||
if err != nil { return err }
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* (3) Copy depth */
|
||||
err = binary.Write(writer, binary.BigEndian, s.depth)
|
||||
if err != nil { return err }
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* (4) Copy migration code */
|
||||
err = binary.Write(writer, binary.BigEndian, s.mcode)
|
||||
if err != nil { return err }
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* (5) Builds a KeySet from its serial representation
|
||||
*
|
||||
* @serial<string> String representation
|
||||
|
@ -155,7 +152,9 @@ func (s *T) Fetch(reader io.Reader) error {
|
|||
|
||||
/* (1) Read the secret size */
|
||||
err = binary.Read(reader, binary.BigEndian, &secretLength)
|
||||
if err != nil { return err }
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* (2) Fail if secretLength lower than digest size */
|
||||
if secretLength < scha.HSIZE {
|
||||
|
@ -163,9 +162,11 @@ func (s *T) Fetch(reader io.Reader) error {
|
|||
}
|
||||
|
||||
/* (3) Try to copy the secret */
|
||||
s.sec = make([]byte, secretLength);
|
||||
s.sec = make([]byte, secretLength)
|
||||
err = binary.Read(reader, binary.BigEndian, &s.sec)
|
||||
if err != nil { return err }
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* (4) Manage invalid secret size (mismatch secretLength) */
|
||||
if uint16(len(s.sec)) != secretLength {
|
||||
|
@ -174,17 +175,20 @@ func (s *T) Fetch(reader io.Reader) error {
|
|||
|
||||
/* (5) Try to copy the depth */
|
||||
err = binary.Read(reader, binary.BigEndian, &s.depth)
|
||||
if err != nil { return err }
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* (6) Try to copy the migration code */
|
||||
err = binary.Read(reader, binary.BigEndian, &s.mcode)
|
||||
if err != nil { return err }
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
|
||||
/* (6) Getter/Setter for migration code 'mcode'
|
||||
*
|
||||
* @mcode<uint8> [OPT] New value
|
||||
|
@ -192,7 +196,7 @@ func (s *T) Fetch(reader io.Reader) error {
|
|||
* @return mcode<uint8> Migration code
|
||||
*
|
||||
---------------------------------------------------------*/
|
||||
func (s T) MigrationCode(optional... uint8) uint8 {
|
||||
func (s T) MigrationCode(optional ...uint8) uint8 {
|
||||
|
||||
/* (1) If no valid code given -> return current one */
|
||||
if len(optional) < 1 || optional[0] > 3 {
|
||||
|
@ -207,8 +211,6 @@ func (s T) MigrationCode(optional... uint8) uint8 {
|
|||
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* (7) Updates depth for rescuing from desynchroisation
|
||||
*
|
||||
* @lastHash<[]byte> Last received hash
|
||||
|
@ -225,14 +227,18 @@ func (s *T) Rescue(lastHash []byte) error {
|
|||
|
||||
/* (1) Browse possible values
|
||||
---------------------------------------------------------*/
|
||||
for i := s.depth ; i <= s.depth+s.ctx.MinDepth() ; i++ {
|
||||
for i := s.depth; i <= s.depth+s.ctx.MinDepth(); i++ {
|
||||
|
||||
/* (1) Process hash */
|
||||
currentHash, err := s.Hash();
|
||||
if err != nil { return err }
|
||||
currentHash, err := s.CurrentHash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
/* (2) If not found -> try again */
|
||||
if string(currentHash) != string(lastHash) { continue }
|
||||
if string(currentHash) != string(lastHash) {
|
||||
continue
|
||||
}
|
||||
|
||||
/* (3) Store new depth */
|
||||
s.depth = i - s.ctx.MinDepth() + 1
|
||||
|
@ -244,7 +250,6 @@ func (s *T) Rescue(lastHash []byte) error {
|
|||
|
||||
}
|
||||
|
||||
return errors.New("Cannot find an available rescue depth");
|
||||
|
||||
return errors.New("Cannot find an available rescue depth")
|
||||
|
||||
}
|
|
@ -6,15 +6,19 @@ import (
|
|||
"git.xdrm.io/schastsp/lib/scha"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGenerationDepthBoundaries(t *testing.T) {
|
||||
|
||||
var min, max uint16 = 0x0f0, 0xfff
|
||||
var rangeMin = min + (max-min)/2
|
||||
var rangeMax = max
|
||||
var created *T;
|
||||
var created *T
|
||||
|
||||
ctx, err := context.Create(2.5, min, 0, max);
|
||||
if err != nil { t.Errorf("Do not expected an error: %s", err); return }
|
||||
ctx, err := context.Create(2.5, min, 0, max)
|
||||
if err != nil {
|
||||
t.Errorf("Do not expected an error: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
created, err = Create(ctx)
|
||||
|
||||
|
@ -29,8 +33,11 @@ func TestGenerationDepthBoundaries(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSchaDecrementingProperty(t *testing.T) {
|
||||
ctx, err := context.Create(2.5);
|
||||
if err != nil { t.Errorf("Do not expected an error"); return }
|
||||
ctx, err := context.Create(2.5)
|
||||
if err != nil {
|
||||
t.Errorf("Do not expected an error")
|
||||
return
|
||||
}
|
||||
|
||||
var h1, h2, hcheck []byte
|
||||
var created *T
|
||||
|
@ -42,7 +49,7 @@ func TestSchaDecrementingProperty(t *testing.T) {
|
|||
}
|
||||
|
||||
/* (1) Get current hash */
|
||||
h1, err = created.Hash()
|
||||
h1, err = created.CurrentHash()
|
||||
if err != nil {
|
||||
t.Errorf("Do not expected an error, got: %s", err)
|
||||
}
|
||||
|
@ -51,7 +58,7 @@ func TestSchaDecrementingProperty(t *testing.T) {
|
|||
created.Decrement()
|
||||
|
||||
/* (3) Get new hash */
|
||||
h2, err = created.Hash()
|
||||
h2, err = created.CurrentHash()
|
||||
if err != nil {
|
||||
t.Errorf("Do not expected an error, got: %s", err)
|
||||
}
|
||||
|
@ -75,8 +82,11 @@ func TestSchaDecrementingProperty(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDecrementMinimum(t *testing.T) {
|
||||
ctx, err := context.Create(2.5);
|
||||
if err != nil { t.Errorf("Do not expected an error"); return }
|
||||
ctx, err := context.Create(2.5)
|
||||
if err != nil {
|
||||
t.Errorf("Do not expected an error")
|
||||
return
|
||||
}
|
||||
|
||||
var h1, h2, hcheck []byte
|
||||
var created *T
|
||||
|
@ -88,7 +98,7 @@ func TestDecrementMinimum(t *testing.T) {
|
|||
}
|
||||
|
||||
/* (1) Get current hash */
|
||||
h1, err = created.Hash()
|
||||
h1, err = created.CurrentHash()
|
||||
if err != nil {
|
||||
t.Errorf("Do not expected an error, got: %s", err)
|
||||
}
|
||||
|
@ -97,7 +107,7 @@ func TestDecrementMinimum(t *testing.T) {
|
|||
created.Decrement()
|
||||
|
||||
/* (3) Get new hash */
|
||||
h2, err = created.Hash()
|
||||
h2, err = created.CurrentHash()
|
||||
if err != nil {
|
||||
t.Errorf("Do not expected an error, got: %s", err)
|
||||
}
|
||||
|
@ -120,49 +130,57 @@ func TestDecrementMinimum(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
func TestRestore(t *testing.T) {
|
||||
|
||||
func TestRestore(t *testing.T){
|
||||
|
||||
var buffer, srcData bytes.Buffer;
|
||||
var src, dst *T;
|
||||
var ctx *context.T;
|
||||
var err error;
|
||||
var buffer, srcData bytes.Buffer
|
||||
var src, dst *T
|
||||
var ctx *context.T
|
||||
var err error
|
||||
|
||||
/* (1) Create a context */
|
||||
ctx, err = context.Create(2.2);
|
||||
if err != nil { t.Fatalf("[Unexpected error] %s", err); }
|
||||
ctx, err = context.Create(2.2)
|
||||
if err != nil {
|
||||
t.Fatalf("[Unexpected error] %s", err)
|
||||
}
|
||||
|
||||
/* (2) Create our source */
|
||||
src, err = Create(ctx);
|
||||
if err != nil { t.Fatalf("[Unexpected error] %s", err); }
|
||||
src, err = Create(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("[Unexpected error] %s", err)
|
||||
}
|
||||
|
||||
/* (3) Store it to buffer (+to another buffer to check after) */
|
||||
buffer.Reset(); srcData.Reset()
|
||||
err = src.Store(&buffer);
|
||||
if err != nil { t.Fatalf("[Unexpected error] %s", err); }
|
||||
err = src.Store(&srcData);
|
||||
if err != nil { t.Fatalf("[Unexpected error] %s", err); }
|
||||
buffer.Reset()
|
||||
srcData.Reset()
|
||||
err = src.Store(&buffer)
|
||||
if err != nil {
|
||||
t.Fatalf("[Unexpected error] %s", err)
|
||||
}
|
||||
err = src.Store(&srcData)
|
||||
if err != nil {
|
||||
t.Fatalf("[Unexpected error] %s", err)
|
||||
}
|
||||
|
||||
/* (4) Create our target */
|
||||
dst, err = Create(ctx);
|
||||
if err != nil { t.Fatalf("[Unexpected error] %s", err); }
|
||||
dst, err = Create(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("[Unexpected error] %s", err)
|
||||
}
|
||||
|
||||
/* (5) Restore from buffer */
|
||||
err = dst.Fetch(&buffer);
|
||||
if err != nil { t.Fatalf("[Unexpected error] %s", err); }
|
||||
err = dst.Fetch(&buffer)
|
||||
if err != nil {
|
||||
t.Fatalf("[Unexpected error] %s", err)
|
||||
}
|
||||
|
||||
/* (6) Get both data */
|
||||
dst.Store(&buffer) // dst data is in buffer
|
||||
if err != nil { t.Fatalf("[Unexpected error] %s", err); }
|
||||
|
||||
if len(buffer.Bytes()) != len(srcData.Bytes()) || buffer.String() != srcData.String() {
|
||||
t.Fatalf("Expected restored keyset to be equal to original ;\n - expected %x\n - got %x", srcData.String(), buffer.String());
|
||||
if err != nil {
|
||||
t.Fatalf("[Unexpected error] %s", err)
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if len(buffer.Bytes()) != len(srcData.Bytes()) || buffer.String() != srcData.String() {
|
||||
t.Fatalf("Expected restored keyset to be equal to original ;\n - expected %x\n - got %x", srcData.String(), buffer.String())
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in New Issue