this float decode has to be wrong

master
Jordan Orelli 8 years ago
parent d3e5c51d9d
commit 215573c039

@ -75,7 +75,7 @@ func (d *Dict) createEntity(id int) error {
Debug.Printf("create entity id: %d classId: %d className: %v class: %v\n", id, classId, className, class)
e := class.New(serial)
d.entities[id] = e
return fillSlots(e, d.sr, d.br)
return fillSlots(e, class.Name.String(), d.sr, d.br)
}
func (d *Dict) getEntity(id int) *Entity {
@ -200,7 +200,7 @@ func (d *Dict) syncBaselines() {
d.br.SetSource(e.Value)
Debug.Printf("syncBaselines has new baseline for class %v", c)
if err := fillSlots(c.baseline, d.sr, d.br); err != nil {
if err := fillSlots(c.baseline, c.Name.String(), d.sr, d.br); err != nil {
Debug.Printf("syncBaselines failed to fill a baseline: %v", err)
continue
}

@ -6,6 +6,8 @@ type Entity struct {
slots []interface{}
}
func (e *Entity) getSlotValue(n int) interface{} { return e.slots[n] }
func (e *Entity) slotName(n int) string { return e.Class.Fields[n].name.String() }
func (e *Entity) slotType(n int) string { return e.Class.Fields[n]._type.String() }
func (e *Entity) slotValue(n int) interface{} { return e.slots[n] }
func (e *Entity) slotDecoder(n int) decoder { return e.Class.Fields[n].decoder }
func (e *Entity) setSlotValue(n int, v interface{}) { e.slots[n] = v }
func (e *Entity) getSlotDecoder(n int) decoder { return e.Class.Fields[n].decoder }

@ -14,7 +14,7 @@ type Field struct {
bits uint // number of bits used to encode field?
low float32 // lower limit of field values
high float32 // upper limit of field values
flags int // dunno what these flags do
flags int // used by float decoder
serializer *Symbol // class on which the field was defined
serializerVersion *int32 // version of the class on which the field was defined
class *Class // source class on which the field was originally defined

@ -7,10 +7,9 @@ import (
)
const (
f_round_down = 1 << iota
f_round_up
f_encode_zero
f_encode_ints
f_min = 1 << iota
f_max
f_center
)
func floatDecoder(f *Field) decoder {
@ -24,13 +23,22 @@ func floatDecoder(f *Field) decoder {
panic("quantization rules make no sense")
}
bits := f.bits
low := f.low
high := f.high
flags := f.flags
// there's a flag that's -8 and i don't know what to do with it. I'm just
// gonna mask away everything except the three least significant bits and
// pray for the best.
flags = flags & 7
// number of input steps
// steps := int(1<<f.bits - 1)
steps := int(1<<f.bits - 1)
// keep the inverse to mult instead of divide later
// inv_steps := 1.0 / float32(steps)
inv_steps := 1.0 / float32(steps)
// total range of values
span := f.high - f.low
@ -39,27 +47,36 @@ func floatDecoder(f *Field) decoder {
panic("quantization span is backwards")
}
if flags&f_round_down&f_round_up > 0 {
panic("how can you round down and up at the same time")
}
// output width of each step
// step_width := span * inv_steps
step_width := span * inv_steps
return func(br bit.Reader) interface{} {
if flags&f_round_down > 0 {
return nil
}
if flags&f_round_up > 0 {
panic("round up flag not done yet")
var special *float32
switch {
case flags&f_min > 0:
special = new(float32)
*special = low
case flags&f_max > 0:
special = new(float32)
*special = high
case flags&f_center > 0:
special = new(float32)
middle := (high + low) * 0.5
// if we're within a step of zero just return zero.
if middle > 0 && middle-step_width < 0 || middle < 0 && middle+step_width > 0 {
middle = 0
}
if flags&f_encode_zero > 0 {
panic("encode zero flag not done yet")
*special = middle
}
if flags&f_encode_ints > 0 {
panic("encode ints flag not done yet")
return func(br bit.Reader) interface{} {
if special != nil && bit.ReadBool(br) {
Debug.Printf("decode float type: %s low: %f high: %f bits: %d steps: %d span: %f flags: %d special: %v", f._type.String(), low, high, bits, steps, span, flags, *special)
return *special
}
return nil
u := br.ReadBits(bits)
out := low + float32(u)*inv_steps*span
Debug.Printf("decode float type: %s low: %f high: %f bits: %d bitVal: %d steps: %d span: %f flags: %d output: %v", f._type.String(), low, high, bits, u, steps, span, flags, out)
return out
}
}

@ -16,17 +16,16 @@ type selection struct {
vals [6]int
}
func (s selection) String() string { return fmt.Sprint(s.path()) }
func (s selection) path() []int { return s.vals[:s.count] }
func (s selection) fill(offset int, dest slotted, br bit.Reader) error {
func (s selection) fill(offset int, displayPath string, dest slotted, br bit.Reader) error {
slot := s.vals[offset]
Debug.Printf("fill selection %v", s)
switch s.count - offset {
case 0:
panic("selection makes no sense")
case 1:
fn := dest.getSlotDecoder(slot)
fn := dest.slotDecoder(slot)
if fn == nil {
switch v := dest.(type) {
case *Entity:
@ -36,18 +35,17 @@ func (s selection) fill(offset int, dest slotted, br bit.Reader) error {
}
}
val := fn(br)
old := dest.getSlotValue(slot)
old := dest.slotValue(slot)
dest.setSlotValue(slot, val)
Debug.Printf("%v -> %v", old, val)
Debug.Printf("%s %s (%s): %v -> %v", s, fmt.Sprintf("%s.%s", displayPath, dest.slotName(slot)), dest.slotType(slot), old, val)
return nil
default:
Debug.Printf("fill child selection...")
v := dest.getSlotValue(slot)
v := dest.slotValue(slot)
vs, ok := v.(slotted)
if !ok {
return fmt.Errorf("child selection refers to a slot that doesn't contain a slotted value")
}
return s.fill(offset+1, vs, br)
return s.fill(offset+1, fmt.Sprintf("%s.%s", displayPath, dest.slotName(slot)), vs, br)
}
}

@ -5,19 +5,21 @@ import (
)
type slotted interface {
getSlotValue(int) interface{}
slotName(int) string
slotValue(int) interface{}
slotType(int) string
slotDecoder(int) decoder
setSlotValue(int, interface{})
getSlotDecoder(int) decoder
}
func fillSlots(dest slotted, sr *selectionReader, br bit.Reader) error {
func fillSlots(dest slotted, displayPath string, sr *selectionReader, br bit.Reader) error {
selections, err := sr.readSelections(br, htree)
if err != nil {
return err
}
for _, s := range selections {
if err := s.fill(0, dest, br); err != nil {
if err := s.fill(0, displayPath, dest, br); err != nil {
return err
}
}

Loading…
Cancel
Save