dimanche 26 juin 2016

Lasagne/Theano mnist example issue

I'm trying to slightly change the code from github here to a toy example of reading a simpler two dimensional data. My toy data set has the following structure

x-coordinate, y-coordinate, class

Some example data points are

1,1
3,1
4,1
4,2
6,2
1,3

and their corresponding classes

0
1 
1
1
1
0

I'm able to read the data and create my custom mlp. However when I try to run the training part, I get the following error

(5, 2)
(5,)
Traceback (most recent call last):
  File "./t.py", line 78, in <module>
  train_err += train_fn(inputs, targets)
  File "/usr/local/lib/python2.7/dist-packages/theano/compile/function_module.py", line 786, in __call__
allow_downcast=s.allow_downcast)
  File "/usr/local/lib/python2.7/dist-packages/theano/tensor/type.py", line 177, in filter
data.shape))
TypeError: ('Bad input argument to theano function with name "./t.py:67"  at index 0(0-based)', 'Wrong number of dimensions: expected 4, got 2 with shape (5, 2).')

This has clearly something to do with the shapes of the arrays I'm passing. But what I can't seem to figure out is why is my case any different from the mnist dataset which is also a two dimensional array of an image.

My entire code is the following.

def build_mlp(input_var=None):
    l_in = lasagne.layers.InputLayer(shape=(None,1,1,2),input_var = input_var)
    l_h1 = lasagne.layers.DropoutLayer(l_in,p = 0.2)
    l_hid1 = lasagne.layers.DenseLayer(
        l_h1,num_units = 10,
        nonlinearity = lasagne.nonlinearities.rectify,
        W = lasagne.init.GlorotUniform())
    l_h2 = lasagne.layers.DropoutLayer(l_hid1,p = 0.2)
    l_hid2 = lasagne.layers.DenseLayer(
        l_h2,num_units = 10,
        nonlinearity = lasagne.nonlinearities.rectify,
        W = lasagne.init.GlorotUniform())
    l_out = lasagne.layers.DenseLayer(
        l_hid2,num_units = 5,
        nonlinearity = lasagne.nonlinearities.softmax,
        W = lasagne.init.GlorotUniform())
    return l_out

def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
    assert len(inputs) == len(targets)
    if shuffle:
        indices = np.arange(len(inputs))
        np.random.shuffle(indices)
    for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
        if shuffle:
            excerpt = indices[start_idx:start_idx + batchsize]
        else:
            excerpt = slice(start_idx, start_idx + batchsize)
        yield inputs[excerpt], targets[excerpt]


x_data = np.genfromtxt('a.csv',delimiter=',')
y_data = np.genfromtxt('b.csv',delimiter=',')

x_train, x_test, y_train, y_test = train_test_split(x_data,y_data,test_size = 0.33)

input_var = T.tensor4('inputs')
target_var = T.ivector('targets')

network = build_mlp(input_var)

prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()

params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.01, momentum=0.4)

test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
                                                        target_var)
test_loss = test_loss.mean()

test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
                  dtype=theano.config.floatX)

train_fn = theano.function([input_var, target_var], loss, updates=updates)
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])

num_epochs = 100
for epoch in range(num_epochs):
    train_err = 0
    start_time = time.time()
    for batch in iterate_minibatches(x_train, y_train, 5, shuffle=True):
        inputs, targets = batch
        print inputs.shape
        print targets.shape
        train_err += train_fn(inputs, targets)

    val_err = 0
    val_acc = 0
    val_batches = 0
    for batch in iterate_minibatches(x_train, y_train, 5, shuffle=False):
        inputs, targets = batch
        err, acc = val_fn(inputs, targets)
        val_err += err
        val_acc += acc
        val_batches += 1

    print 'Epoch %d of %d took {:%0.3f}s' % (epoch + 1, num_epochs, time.time() - start_time)
    print("  training loss:tt{:.6f}".format(train_err / train_batches))
    print("  validation loss:tt{:.6f}".format(val_err / val_batches))
    print("  validation accuracy:tt{:.2f} %".format(val_acc / val_batches * 100))

Could someone point to me what I'm doing off here please?

Aucun commentaire:

Enregistrer un commentaire