Skip to content

Commit

Permalink
- added unit test torch ascii save files
Browse files Browse the repository at this point in the history
- fixed various issues related to parsing torch
  • Loading branch information
lessthanoptimal committed May 12, 2016
1 parent 0dd9cd9 commit b221550
Show file tree
Hide file tree
Showing 17 changed files with 105 additions and 92 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -188,11 +188,10 @@ private TorchTensor parseTensor() throws IOException {
}
} else {
int a = readS32();
int b = readS32();
int c = readS32();
long b = readS64();

if( verbose )
System.out.println(" no dimension. Weird variable "+a+" "+b+" "+c);
System.out.println(" no dimension. Weird variable "+a+" "+b);
}
return t;
}
Expand Down Expand Up @@ -269,7 +268,7 @@ private TorchObject parseTable() throws IOException {
} else if( o_key instanceof TorchNumber ) {
key = ((TorchNumber)o_key).value;
} else {
throw new RuntimeException("Add support");
throw new RuntimeException("Add support for "+o_key);
}

TorchObject value = parseNext(true);
Expand Down
27 changes: 23 additions & 4 deletions modules/io/src/main/java/deepboof/io/torch7/ParseAsciiTorch7.java
Original file line number Diff line number Diff line change
Expand Up @@ -94,17 +94,26 @@ public void readArrayDouble(int size, double[] storage) throws IOException {
if( words.length != size )
throw new IOException("Unexpected number of words "+size+" found "+words.length);
for (int i = 0; i < size; i++) {
storage[i] = Double.parseDouble(words[i]);
if( words[i].endsWith("nan"))
storage[i] = Double.NaN;
else
storage[i] = Double.parseDouble(words[i]);
}
// int foo = input.readByte();
}

@Override
public void readArrayFloat(int size, float[] storage) throws IOException {
// for (int i = 0; i < size; i++) {
// storage[i] = readFloat();
// }
// input.readByte();
String line = readInnerString();
String words[] = line.split(" ");
if( words.length != size )
throw new IOException("Unexpected number of words "+size+" found "+words.length);
for (int i = 0; i < size; i++) {
storage[i] = readFloat();
storage[i] = Float.parseFloat(words[i]);
}
input.readByte();
}

@Override
Expand Down Expand Up @@ -132,7 +141,17 @@ private String readInnerString() throws IOException {
break;
}
buffer[length++] = (byte)value;

if( buffer.length == length ) {
growBuffer();
}
}
return new String(buffer,0,length);
}

private void growBuffer() {
byte tmp[] = new byte[ buffer.length + 1024];
System.arraycopy(buffer,0,tmp,0,buffer.length);
buffer = tmp;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -126,16 +126,16 @@ public void readArrayDouble(int size, double[] storage) throws IOException {
if( littleEndian ) {
int idx = 0;
for (int i = 0; i < size; i++, idx += 8) {
long a = (tmp[idx]&0xFF) | (tmp[idx+1]&0xFF)<<8 | (tmp[idx+2]&0xFF)<<16 | (tmp[idx+3]&0xFF) << 24;
long b = (tmp[idx+4]&0xFF) | (tmp[idx+5]&0xFF)<<8 | (tmp[idx+6]&0xFF)<<16 | (tmp[idx+7]&0xFF) << 24;
long a = (tmp[idx]&0xFF) | (tmp[idx+1]&0xFF)<<8 | (tmp[idx+2]&0xFF)<<16 | (long)(tmp[idx+3]&0xFF) << 24L;
long b = (tmp[idx+4]&0xFF) | (tmp[idx+5]&0xFF)<<8 | (tmp[idx+6]&0xFF)<<16 | (long)(tmp[idx+7]&0xFF) << 24;

storage[i] = Double.longBitsToDouble(b << 32 | a );
}
} else {
int idx = 0;
for (int i = 0; i < size; i++, idx += 8) {
int a = (tmp[idx+3]&0xFF) | (tmp[idx+2]&0xFF)<<8 | (tmp[idx+1]&0xFF)<<16 | (tmp[idx]&0xFF) << 24;
long b = (tmp[idx+7]&0xFF) | (tmp[idx+6]&0xFF)<<8 | (tmp[idx+5]&0xFF)<<16 | (tmp[idx+4]&0xFF) << 24;
long a = (tmp[idx+3]&0xFF) | (tmp[idx+2]&0xFF)<<8 | (tmp[idx+1]&0xFF)<<16 | (long)(tmp[idx]&0xFF) << 24;
long b = (tmp[idx+7]&0xFF) | (tmp[idx+6]&0xFF)<<8 | (tmp[idx+5]&0xFF)<<16 | (long)(tmp[idx+4]&0xFF) << 24;

storage[i] = Double.longBitsToDouble(a << 32 | b );
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,34 +130,48 @@ private void checkFunction(String directory , Class functionClass ) {
if( !d.isDirectory() )
continue;

Tensor input = convert(read(new File(d,"input")));
FunctionAndParameters fap = convert(read(new File(d,"operation")));
Tensor expected = convert(read(new File(d,"output")));

if( fap != null ) {
Function function = fap.getFunction();
if (functionClass != null)
assertTrue("Unexpected class type. " + function.getClass().getSimpleName(),
function.getClass() == functionClass);

int N = input.length(0);

function.initialize(TH(input.getShape()));
Tensor found = input.create(WI(N, function.getOutputShape()));
function.setParameters(fap.parameters);
function.forward(input, found);
DeepUnitTest.assertEquals(expected,found, Accuracy.STANDARD);
} else {
// there is no function, so the function is supposed to be pointless and input should be
// the same as output
DeepUnitTest.assertEquals(expected,input, Accuracy.STANDARD);
}
Tensor input;FunctionAndParameters fap;Tensor expected;

// First test the binary parser
input = convert(readBinary(new File(d,"input")));
fap = convert(readBinary(new File(d,"operation")));
expected = convert(readBinary(new File(d,"output")));

checkFunction(functionClass, input, fap, expected);

// Now test the ASCII parser
input = convert(readAscii(new File(d,"input_ascii")));
fap = convert(readAscii(new File(d,"operation_ascii")));
expected = convert(readAscii(new File(d,"output_ascii")));

checkFunction(functionClass, input, fap, expected);

count++;
}
assertTrue(count>0);
}

private void checkFunction(Class functionClass, Tensor input, FunctionAndParameters fap, Tensor expected) {
if( fap != null ) {
Function function = fap.getFunction();
if (functionClass != null)
assertTrue("Unexpected class type. " + function.getClass().getSimpleName(),
function.getClass() == functionClass);

int N = input.length(0);

function.initialize(TH(input.getShape()));
Tensor found = input.create(WI(N, function.getOutputShape()));
function.setParameters(fap.parameters);
function.forward(input, found);
DeepUnitTest.assertEquals(expected,found, Accuracy.STANDARD);
} else {
// there is no function, so the function is supposed to be pointless and input should be
// the same as output
DeepUnitTest.assertEquals(expected,input, Accuracy.STANDARD);
}
}

private void checkSequence(String directory ) {
File pathToOp = new File(pathToData,directory);

Expand All @@ -166,9 +180,9 @@ private void checkSequence(String directory ) {
if( !d.isDirectory() )
continue;

Tensor input = convert(read(new File(d,"input")));
SequenceAndParameters sap = convert(read(new File(d,"operation")));
Tensor expected = convert(read(new File(d,"output")));
Tensor input = convert(readBinary(new File(d,"input")));
SequenceAndParameters sap = convert(readBinary(new File(d,"operation")));
Tensor expected = convert(readBinary(new File(d,"output")));

ForwardSequence forward = new ForwardSequence(sap.sequence,sap.type);

Expand All @@ -185,7 +199,7 @@ private void checkSequence(String directory ) {
assertTrue(count>0);
}

private <T extends TorchObject>T read( File path ) {
private <T extends TorchObject>T readBinary(File path ) {
try {
List<TorchObject> found = new ParseBinaryTorch7().parse(path);
assertEquals(1,found.size());
Expand All @@ -194,4 +208,14 @@ private <T extends TorchObject>T read( File path ) {
throw new RuntimeException(e);
}
}

private <T extends TorchObject>T readAscii(File path ) {
try {
List<TorchObject> found = new ParseAsciiTorch7().setVerbose(true).parse(path);
assertEquals(1,found.size());
return (T)found.get(0);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
10 changes: 2 additions & 8 deletions modules/io/src/test/torch7/GenerateBatchNorm.lua
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,7 @@ for k,data_type in pairs(boof.float_types) do
operation:evaluate()
local output = operation:forward(input)

-- No need to explicitly save the weight and bias because it is saved with the operation
torch.save(paths.concat(output_dir,'input'), input)
torch.save(paths.concat(output_dir,'operation'), operation)
torch.save(paths.concat(output_dir,'output'), output)
boof.save(output_dir,input,operation,output)

------------------------------------------------------------------------
-- The same but with gamma+beta (a.k.a. weight and bias)
Expand All @@ -47,8 +44,5 @@ for k,data_type in pairs(boof.float_types) do
operation:evaluate()
output = operation:forward(input)

-- No need to explicitly save the weight and bias because it is saved with the operation
torch.save(paths.concat(output_dir,'input'), input)
torch.save(paths.concat(output_dir,'operation'), operation)
torch.save(paths.concat(output_dir,'output'), output)
boof.save(output_dir,input,operation,output)
end
5 changes: 1 addition & 4 deletions modules/io/src/test/torch7/GenerateCudaLinear.lua
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,5 @@ operation.gradBias = nil
operation.gradInput = nil
operation.gradWeight = nil

-- No need to explicitly save the weight and bias because it is saved with the operation
torch.save(paths.concat(output_dir,'input'), input)
torch.save(paths.concat(output_dir,'operation'), operation)
torch.save(paths.concat(output_dir,'output'), output)
boof.save(output_dir,input,operation,output)

4 changes: 1 addition & 3 deletions modules/io/src/test/torch7/GenerateDropout.lua
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,7 @@ local function generate( variant , data_type, v1)
operation:evaluate()
local output = operation:forward(input)

torch.save(paths.concat(output_dir,'input'), input)
torch.save(paths.concat(output_dir,'operation'), operation)
torch.save(paths.concat(output_dir,'output'), output)
boof.save(output_dir,input,operation,output)
end

for k,data_type in pairs(boof.float_types) do
Expand Down
5 changes: 1 addition & 4 deletions modules/io/src/test/torch7/GenerateLinear.lua
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,5 @@ for k,data_type in pairs(boof.float_types) do
operation.gradInput = nil
operation.gradWeight = nil

-- No need to explicitly save the weight and bias because it is saved with the operation
torch.save(paths.concat(output_dir,'input'), input)
torch.save(paths.concat(output_dir,'operation'), operation)
torch.save(paths.concat(output_dir,'output'), output)
boof.save(output_dir,input,operation,output)
end
4 changes: 1 addition & 3 deletions modules/io/src/test/torch7/GenerateReLU.lua
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,5 @@ for k,data_type in pairs(boof.float_types) do
operation:evaluate()
local output = operation:forward(input)

torch.save(paths.concat(output_dir,'input'), input)
torch.save(paths.concat(output_dir,'operation'), operation)
torch.save(paths.concat(output_dir,'output'), output)
boof.save(output_dir,input,operation,output)
end
10 changes: 2 additions & 8 deletions modules/io/src/test/torch7/GenerateSequential.lua
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,7 @@ local function generateLinearLinear( variant , data_type)
prune(operation2)
sequence.output = nul

-- No need to explicitly save the weight and bias because it is saved with the operation
torch.save(paths.concat(output_dir,'input'), input)
torch.save(paths.concat(output_dir,'operation'), sequence)
torch.save(paths.concat(output_dir,'output'), output)
boof.save(output_dir,input,sequence,output)
end

------------------------------------------------------------------------------------------------------
Expand Down Expand Up @@ -80,10 +77,7 @@ local function generateViewLinear( variant , data_type)
prune(operation2)
sequence.output = nul

-- No need to explicitly save the weight and bias because it is saved with the operation
torch.save(paths.concat(output_dir,'input'), input)
torch.save(paths.concat(output_dir,'operation'), sequence)
torch.save(paths.concat(output_dir,'output'), output)
boof.save(output_dir,input,sequence,output)
end

for k,data_type in pairs(boof.float_types) do
Expand Down
4 changes: 1 addition & 3 deletions modules/io/src/test/torch7/GenerateSigmoid.lua
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,5 @@ for k,data_type in pairs(boof.float_types) do
operation:evaluate()
local output = operation:forward(input)

torch.save(paths.concat(output_dir,'input'), input)
torch.save(paths.concat(output_dir,'operation'), operation)
torch.save(paths.concat(output_dir,'output'), output)
boof.save(output_dir,input,operation,output)
end
10 changes: 2 additions & 8 deletions modules/io/src/test/torch7/GenerateSpatialBatchNorm.lua
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,7 @@ for k,data_type in pairs(boof.float_types) do
operation:evaluate()
local output = operation:forward(input)

-- No need to explicitly save the weight and bias because it is saved with the operation
torch.save(paths.concat(output_dir,'input'), input)
torch.save(paths.concat(output_dir,'operation'), operation)
torch.save(paths.concat(output_dir,'output'), output)
boof.save(output_dir,input,operation,output)

------------------------------------------------------------------------
-- The same but with gamma+beta (a.k.a. weight and bias)
Expand All @@ -51,8 +48,5 @@ for k,data_type in pairs(boof.float_types) do
operation:evaluate()
output = operation:forward(input)

-- No need to explicitly save the weight and bias because it is saved with the operation
torch.save(paths.concat(output_dir,'input'), input)
torch.save(paths.concat(output_dir,'operation'), operation)
torch.save(paths.concat(output_dir,'output'), output)
boof.save(output_dir,input,operation,output)
end
5 changes: 1 addition & 4 deletions modules/io/src/test/torch7/GenerateSpatialConvolution.lua
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,7 @@ local function generate( variant , data_type)
operation.gradInput = nil
operation.gradWeight = nil

-- No need to explicitly save the weight and bias because it is saved with the operation
torch.save(paths.concat(output_dir,'input'), input)
torch.save(paths.concat(output_dir,'operation'), operation)
torch.save(paths.concat(output_dir,'output'), output)
boof.save(output_dir,input,operation,output)

end

Expand Down
4 changes: 1 addition & 3 deletions modules/io/src/test/torch7/GenerateSpatialDropout.lua
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,7 @@ local function generate( variant , data_type, v1)
operation:evaluate()
local output = operation:forward(input)

torch.save(paths.concat(output_dir,'input'), input)
torch.save(paths.concat(output_dir,'operation'), operation)
torch.save(paths.concat(output_dir,'output'), output)
boof.save(output_dir,input,operation,output)
end

for k,data_type in pairs(boof.float_types) do
Expand Down
4 changes: 1 addition & 3 deletions modules/io/src/test/torch7/GenerateSpatialMaxPooling.lua
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,7 @@ local function generate( variant , data_type)
operation:evaluate()
local output = operation:forward(input)

torch.save(paths.concat(output_dir,'input'), input)
torch.save(paths.concat(output_dir,'operation'), operation)
torch.save(paths.concat(output_dir,'output'), output)
boof.save(output_dir,input,operation,output)

end

Expand Down
4 changes: 1 addition & 3 deletions modules/io/src/test/torch7/GenerateTanH.lua
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,5 @@ for k,data_type in pairs(boof.float_types) do
operation:evaluate()
local output = operation:forward(input)

torch.save(paths.concat(output_dir,'input'), input)
torch.save(paths.concat(output_dir,'operation'), operation)
torch.save(paths.concat(output_dir,'output'), output)
boof.save(output_dir,input,operation,output)
end
10 changes: 10 additions & 0 deletions modules/io/src/test/torch7/boof.lua
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,13 @@ function boof.create_output(operation_name, data_type, variant)

return output_dir
end

function boof.save( output_dir, input, operation, output )
torch.save(paths.concat(output_dir,'input'), input)
torch.save(paths.concat(output_dir,'operation'), operation)
torch.save(paths.concat(output_dir,'output'), output)

torch.save(paths.concat(output_dir,'input_ascii'), input, 'ascii')
torch.save(paths.concat(output_dir,'operation_ascii'), operation, 'ascii')
torch.save(paths.concat(output_dir,'output_ascii'), output, 'ascii')
end

0 comments on commit b221550

Please sign in to comment.