8000 Add local layer to dev by jackculpepper · Pull Request #5 · jackculpepper/caffe · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

Add local layer to dev #5

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 9 commits into
base: dev
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8000
25 changes: 25 additions & 0 deletions examples/mnist/lenet_local_solver.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# The train/test net protocol buffer definition
net: "examples/mnist/lenet_local_train_test.prototxt"
# test_iter specifies how many fo 8000 rward passes the test should carry out.
# In the case of MNIST, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
test_iter: 100
# Carry out testing every 500 training iterations.
test_interval: 500
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.01
momentum: 0.9
weight_decay: 0.0005
# The learning rate policy
lr_policy: "inv"
gamma: 0.0001
power: 0.75
# Display every 100 iterations
display: 100
# The maximum number of iterations
max_iter: 10000
# snapshot intermediate results
snapshot: 5000
snapshot_prefix: "examples/mnist/lenet"
# solver mode: CPU or GPU
solver_mode: GPU
171 changes: 171 additions & 0 deletions examples/mnist/lenet_local_train_test.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
name: "LeNet"
layers {
name: "mnist"
type: DATA
top: "data"
top: "label"
data_param {
source: "examples/mnist/mnist_train_lmdb"
backend: LMDB
batch_size: 64
}
transform_param {
scale: 0.00390625
}
include: { phase: TRAIN }
}
layers {
< 10000 /td> name: "mnist"
type: DATA
top: "data"
top: "label"
data_param {
source: "examples/mnist/mnist_test_lmdb"
backend: LMDB
batch_size: 100
}
transform_param {
scale: 0.00390625
}
include: { phase: TEST }
}

layers {
name: "conv1"
type: CONVOLUTION
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
convolution_param {
num_output: 20
kernel_size: 5
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layers {
name: "pool1"
type: POOLING
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layers {
name: "local1"
type: LOCAL
bottom: "pool1"
top: "local1"
blobs_lr: 1
blobs_lr: 1
local_param {
num_output: 5
kernel_size: 5
stride: 1
pad: 0
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layers {
name: "relu1"
type: RELU
bottom: "local1"
top: "local1"
}
layers {
name: "local2"
type: LOCAL
bottom: "local1"
top: "local2"
blobs_lr: 1
blobs_lr: 1
local_param {
num_output: 10
kernel_size: 5
stride: 1
pad: 0
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layers {
name: "relu2"
type: RELU
bottom: "local2"
top: "local2"
}
layers {
name: "ip1"
type: INNER_PRODUCT
bottom: "local2"
top: "ip1"
blobs_lr: 1
blobs_lr: 2
inner_product_param {
num_output: 500
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layers {
name: "relu2"
type: RELU
bottom: "ip1"
top: "ip1"
}
layers {
name: "ip2"
type: INNER_PRODUCT
bottom: "ip1"
top: "ip2"
blobs_lr: 1
blobs_lr: 2
inner_product_param {
num_output: 10
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layers {
name: "accuracy"
type: ACCURACY
bottom: "ip2"
bottom: "label"
top: "accuracy"
include: { phase: TEST }
}
layers {
name: "loss"
type: SOFTMAX_LOSS
bottom: "ip2"
bottom: "label"
top: "loss"
}
2 changes: 1 addition & 1 deletion examples/mnist/train_lenet.sh
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
#!/usr/bin/env sh

./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototxt
GLOG_logtostderr=0 GLOG_log_dir=examples/mnist/ ./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototxt
3 changes: 3 additions & 0 deletions examples/mnist/train_lenet_local.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#!/usr/bin/env sh

GLOG_logtostderr=0 GLOG_log_dir=examples/mnist/ ./build/tools/caffe train --solver=examples/mnist/lenet_local_solver.prototxt --gpu=1
25 changes: 25 additions & 0 deletions examples/siamese/mnist_siamese_local_solver.prototxt
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# The train/test net protocol buffer definition
net: "examples/siamese/mnist_siamese_local_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of MNIST, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
test_iter: 100
# Carry out testing every 500 training iterations.
test_interval: 500
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.01
momentum: 0.9
weight_decay: 0.0000
# The learning rate policy
lr_policy: "inv"
gamma: 0.0001
power: 0.75
# Display every 100 iterations
display: 100
# The maximum number of iterations
max_iter: 50000
# snapshot intermediate results
snapshot: 5000
snapshot_prefix: "examples/siamese/mnist_siamese"
# solver mode: CPU or GPU
solver_mode: GPU
Loading
0