二. caffe2之保存/讀取模型並retrain
1. 關於caffe2的examples
首先貼一個下模型的命令,以下alexnet為例,其實官網也有,後面不管是官網的例子還
是自己要fine tune都可能會用到這些模型,download可能會很慢,可以多試幾次:
python -m caffe2.python.models.download bvlc_alexnet
不必多說,大部分例子直接按官網的教程,這裡說一下其中一個教程:
Models and Datasets
這是官網的直接run一個squeezenet模型的教程,需要注意的一點是不要使用
squeezenet,否則會出現如下錯誤:
Segmentation fault (core dumped)
可以使用bvlc_alexnet等,就不會報錯,GitHub的issue里給的解釋是,可見這是一個還
沒解決的bug:
2. 搭建一個簡單的模型
首先根據官網MNIST的例子,搭建一個可以train的model,一下給出一個簡單的例
子,也可以看出caffe2搭建網路的方便之處:
# 1.Data preparationndef AddInput(model, batch_size, db, db_type):n data_uint8, label = model.TensorProtosDBInput(n [], ["data_uint8", "label"],n batch_size=batch_size,db=db, db_type=db_type)n # cast the data to float 256 -> 256.0 -> [0,1]n data = model.Cast(data_uint8, "data", to=core.DataType.FLOAT)n data = model.Scale(data, data, scale=float(1.0/256))n data = model.StopGradient(data, data)n return data, labelnn# 2.define model, main computationndef AddLeNetModel(model, data):n conv1 = brew.conv(model, data, conv1, dim_in=1, dim_out=20, kernel=5) #28x28->24x24n pool1 = brew.max_pool(model, conv1, pool1, kernel=2, stride=2) #24x24->12x12n conv2 = brew.conv(model, pool1, conv2, 20, 100, kernel=5) #12x12 -> 8x8n pool2 = brew.max_pool(model, conv2, pool2, kernel=2, stride=2) #8x8->4x4n fc3 = brew.fc(model, pool2, fc3, dim_in=100*4*4, dim_out=500)n relu = brew.relu(model, fc3, fc3)n fc10 = brew.fc(model, relu, fc10, 500, 10)n softmax = brew.softmax(model, fc10, softmax)n return softmaxnndef AddAccuracy(model,softmax,label):n acc = brew.accuracy(model, [softmax,label], acc)n return accnn# 3.train, update gradientndef AddTrainOp(model, softmax, label):n xent = model.LabelCrossEntropy([softmax,label], xent) #get cross entropyn loss = model.AveragedLoss(xent, loss)n AddAccuracy(model,softmax,label) #track the accuracyn model.AddGradientOperators([loss]) #key part, will calc grad with lossn iter = brew.iter(model, iter) #number of iterations (model,blob_out)n LR = model.LearningRate(iter, LR, base_lr=-0.1, policy=step,stepsize=1, gamma=0.999)n ONE = model.param_init_net.ConstantFill([], ONE, shape=[1], value=1.0) #used in gradient updaten for param in model.params:n param_grad = model.param_to_grad[param]n model.WeightedSum([param, ONE, param_grad, LR], param) # param += LR*param_gradn n# 4.print log to filendef AddBookkeepOp(model):n model.Print(loss, [], to_file=0)n model.Print(acc,[], to_file=0)nn# mainnroot_folder = /home/xxx/caffe2ndata_folder = os.path.join(root_folder, models, mnist)n#train pipelinenarg_scope = {order:NCHW}ntrain_model = model_helper.ModelHelper(name=mnist_train, arg_scope=arg_scope)ndata, label = AddInput(train_model, batch_size=64,n db=os.path.join(data_folder,mnist-train-nchw-lmdb),n db_type=lmdb)nsoftmax = AddLeNetModel(train_model,data)nAddTrainOp(train_model, softmax, label)nAddBookkeepOp(train_model)n#test pipelinentest_model = model_helper.ModelHelper(name=mnist_test,arg_scope=arg_scope,init_params=False)ndata, label = AddInput(test_model, batch_size=100,n db=os.path.join(data_folder, mnist-test-nchw-lmdb),n db_type=lmdb)nsoftmax = AddLeNetModel(test_model, data)nAddAccuracy(test_model, softmax, label)nn# now start trainingn# start Run now!n""" train """nworkspace.RunNetOnce(train_model.param_init_net)nworkspace.CreateNet(train_model.net, overwrite=True)ntotal_iters = 10nacc = np.zeros(total_iters)nloss = np.zeros(total_iters)nfor i in xrange(total_iters):n workspace.RunNet(train_model.net)n blob = workspace.FetchBlob("label")n print blob.shapen acc[i] = workspace.FetchBlob(acc)n loss[i] = workspace.FetchBlob(loss)nplt.figure()nplt.plot(loss,b)nplt.plot(acc,r)nplt.legend((Loss,Accuracy), loc=upper right)nn""" test """nworkspace.RunNetOnce(test_model.param_init_net)nworkspace.CreateNet(test_model.net, overwrite=True)ntest_acc = np.zeros(100)nfor i in xrange(100):n workspace.RunNet(test_model.net)n print(workspace.FetchBlob(loss))n test_acc[i] = workspace.FetchBlob(acc)nplt.figure()nplt.plot(test_acc,r)nplt.legend((Accuracy), loc=lower right)n
為了演示後面的retrain過程,我這裡只是先稍微訓了幾次迭代:
結果如下:
可見這時候的loss還很大,acc還很小,因為我只進行了10次迭代,接下來說模型的保存
和載入。
3. 保存讀取模型(init_net.pb, predict_net.pb)
模型已經訓練完,接下來進行模型的保存和讀取,會在當前目錄下生成init_net.pb,
predict_net.pb兩個文件:
INIT_NET = ./init_net.pbnPREDICT_NET = ./predict_net.pbndef save_net(INIT_NET, PREDICT_NET, model) :n with open(PREDICT_NET, wb) as f:n f.write(model.net._net.SerializeToString())n init_net = caffe2_pb2.NetDef()n for param in model.params:n blob = workspace.FetchBlob(param)n shape = blob.shapen op = core.CreateOperator("GivenTensorFill", [], [param],n arg=[utils.MakeArgument("shape", shape),n utils.MakeArgument("values", blob)])n init_net.op.extend([op])n init_net.op.extend([core.CreateOperator("ConstantFill", [],n ["data"], shape=(1,28,28))])n with open(INIT_NET, wb) as f:n f.write(init_net.SerializeToString())ndef load_net(INIT_NET, PREDICT_NET, device_opts):n init_def = caffe2_pb2.NetDef()n with open(INIT_NET, r) as f:n init_def.ParseFromString(f.read())n init_def.device_option.CopyFrom(device_opts)n net_def = caffe2_pb2.NetDef()n with open(PREDICT_NET, r) as f:n net_def.ParseFromString(f.read())n net_def.device_option.CopyFrom(device_opts)n predict_net = core.Net(net_def)n init_net = core.Net(init_def)n return init_net, predict_netnndevice_opts = core.DeviceOption(caffe2_pb2.CPU, 0) # change to core.DeviceOption(caffe2_pb2.CUDA, 0) for GPU processingnsave_net(INIT_NET, PREDICT_NET, train_model) #testn
這兩個文件分別存了模型的參數和模型定義,init_net.pb主要存放了模型的初始化參數,
通過net_printer可以看出就是每一層W,b:
predict_net.pb主要存放了模型的定義:
4. 讀取模型並retrain
# reset workspace to show the retrained modelnworkspace.ResetWorkspace(../)nretrain_model = model_helper.ModelHelper(name=mnist_retrain, arg_scope=arg_scope)ninit_net, predict_net = load_net(INIT_NET, PREDICT_NET,device_opts)nretrain_model.param_init_net.AppendNet(init_net)nretrain_model.net.AppendNet(predict_net)ndata, label = AddInput(retrain_model, batch_size=64,n db=os.path.join(data_folder,mnist-train-nchw-lmdb),n db_type=lmdb)niter = brew.iter(retrain_model, iter)nONE = retrain_model.param_init_net.ConstantFill([], ONE, shape=[1], value=1.0)nLR = retrain_model.LearningRate(iter, LR, base_lr=-0.1, policy=step,stepsize=10, gamma=0.999)nworkspace.RunNetOnce(retrain_model.param_init_net)nworkspace.CreateNet(retrain_model.net, overwrite=True)ntotal_iters = 500nacc = np.zeros(total_iters)nloss = np.zeros(total_iters)nfor i in xrange(total_iters):n workspace.RunNet(retrain_model.net)n acc[i] = workspace.FetchBlob(acc)n loss[i] = workspace.FetchBlob(loss)nplt.figure()nplt.title(retrain)nplt.plot(loss,b)nplt.plot(acc,r)nplt.legend((Loss,Accuracy), loc=upper right)n
可見我們已經成功load出之前保存的模型,並且成功retrain:
下一篇可能記錄我在Android手機上部署的經驗or實現一篇模型論文or記錄一下fine tune...
推薦閱讀:
TAG:Caffe2 |