I found another way.
I could solve this problem by returning the string proto.
Basically, you can add lines with layers to be replaced (in my case, the first layer).
def lenet(path_to_lmdb_train, path_to_lmdb_test, batch_size_train, batch_size_test ): n = caffe.NetSpec() n.data, n.label = L.Data(batch_size=batch_size_train, backend=P.Data.LMDB, source=path_to_lmdb_train, include=dict(phase=caffe.TRAIN), transform_param=dict(scale=1./255), ntop=2) first_layer = str(n.to_proto()) n.data, n.label = L.Data(batch_size=batch_size_test, backend=P.Data.LMDB, source=path_to_lmdb_test, include=dict(phase=caffe.TEST), transform_param=dict(scale=1./255), ntop=2) n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier')) n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier')) n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX) n.ip1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier')) n.relu1 = L.ReLU(n.ip1, in_place=True) n.ip2 = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier')) n.loss = L.SoftmaxWithLoss( n.ip2, n.label ) n.accuracy = L.Accuracy( n.ip2, n.label, include=dict(phase=caffe.TEST) ) return first_layer + str(n.to_proto())
source share