Skip to content

Commit 218ca15

Browse files
jeffdonahueshelhamer
authored andcommitted
cpplint.py -> cpp_lint.py and NOLINT_NEXTLINE -> NOLINT_NEXT_LINE
1 parent 41246fc commit 218ca15

15 files changed

Lines changed: 37 additions & 37 deletions

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ linecount: clean
121121
cloc --read-lang-def=$(PROJECT).cloc src/$(PROJECT)/
122122

123123
lint:
124-
./scripts/cpplint.py $(NONGEN_CXX_SRCS)
124+
./scripts/cpp_lint.py $(NONGEN_CXX_SRCS)
125125

126126
test: init $(TEST_BINS)
127127

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -437,8 +437,8 @@
437437

438438
_regexp_compile_cache = {}
439439

440-
# Finds occurrences of NOLINT[_NEXTLINE] or NOLINT[_NEXTLINE](...).
441-
_RE_SUPPRESSION = re.compile(r'\bNOLINT(_NEXTLINE)?\b(\([^)]*\))?')
440+
# Finds occurrences of NOLINT[_NEXT_LINE] or NOLINT[_NEXT_LINE](...).
441+
_RE_SUPPRESSION = re.compile(r'\bNOLINT(_NEXT_LINE)?\b(\([^)]*\))?')
442442

443443
# {str, set(int)}: a map from error categories to sets of linenumbers
444444
# on which those errors are expected and should be suppressed.
@@ -472,7 +472,7 @@ def ParseNolintSuppressions(filename, raw_line, linenum, error):
472472
# FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
473473
matched = _RE_SUPPRESSION.search(raw_line)
474474
if matched:
475-
if matched.group(1) == '_NEXTLINE':
475+
if matched.group(1) == '_NEXT_LINE':
476476
linenum += 1
477477
category = matched.group(2)
478478
if category in (None, '(*)'): # => "suppress all"

src/caffe/layers/bnll_layer.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
5959
const Dtype* bottom_data = bottom[0]->gpu_data();
6060
Dtype* top_data = (*top)[0]->mutable_gpu_data();
6161
const int count = bottom[0]->count();
62-
// NOLINT_NEXTLINE(whitespace/operators)
62+
// NOLINT_NEXT_LINE(whitespace/operators)
6363
BNLLForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
6464
count, bottom_data, top_data);
6565
CUDA_POST_KERNEL_CHECK;
@@ -84,7 +84,7 @@ Dtype BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
8484
const Dtype* top_diff = top[0]->gpu_diff();
8585
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
8686
const int count = (*bottom)[0]->count();
87-
// NOLINT_NEXTLINE(whitespace/operators)
87+
// NOLINT_NEXT_LINE(whitespace/operators)
8888
BNLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
8989
count, top_diff, bottom_data, bottom_diff);
9090
CUDA_POST_KERNEL_CHECK;

src/caffe/layers/data_layer.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -50,15 +50,15 @@ void* DataLayerPrefetch(void* layer_pointer) {
5050
int h_off, w_off;
5151
// We only do random crop when we do training.
5252
if (Caffe::phase() == Caffe::TRAIN) {
53-
// NOLINT_NEXTLINE(runtime/threadsafe_fn)
53+
// NOLINT_NEXT_LINE(runtime/threadsafe_fn)
5454
h_off = rand() % (height - cropsize);
55-
// NOLINT_NEXTLINE(runtime/threadsafe_fn)
55+
// NOLINT_NEXT_LINE(runtime/threadsafe_fn)
5656
w_off = rand() % (width - cropsize);
5757
} else {
5858
h_off = (height - cropsize) / 2;
5959
w_off = (width - cropsize) / 2;
6060
}
61-
// NOLINT_NEXTLINE(runtime/threadsafe_fn)
61+
// NOLINT_NEXT_LINE(runtime/threadsafe_fn)
6262
if (mirror && rand() % 2) {
6363
// Copy mirrored version
6464
for (int c = 0; c < channels; ++c) {
@@ -143,7 +143,7 @@ void DataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
143143
iter_->SeekToFirst();
144144
// Check if we would need to randomly skip a few data points
145145
if (this->layer_param_.rand_skip()) {
146-
// NOLINT_NEXTLINE(runtime/threadsafe_fn)
146+
// NOLINT_NEXT_LINE(runtime/threadsafe_fn)
147147
unsigned int skip = rand() % this->layer_param_.rand_skip();
148148
LOG(INFO) << "Skipping first " << skip << " data points.";
149149
while (skip-- > 0) {

src/caffe/layers/dropout_layer.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
8282
CURAND_CHECK(curandGenerate(Caffe::curand_generator(),
8383
(unsigned int*)(rand_vec_->mutable_gpu_data()), count));
8484
// set thresholds
85-
// NOLINT_NEXTLINE(whitespace/operators)
85+
// NOLINT_NEXT_LINE(whitespace/operators)
8686
DropoutForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
8787
count, bottom_data, (unsigned int*)rand_vec_->gpu_data(), uint_thres_,
8888
scale_, top_data);
@@ -113,7 +113,7 @@ Dtype DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
113113
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
114114
const unsigned int* mask = (unsigned int*)rand_vec_->gpu_data();
115115
const int count = (*bottom)[0]->count();
116-
// NOLINT_NEXTLINE(whitespace/operators)
116+
// NOLINT_NEXT_LINE(whitespace/operators)
117117
DropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
118118
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
119119
CUDA_POST_KERNEL_CHECK;

src/caffe/layers/lrn_layer.cu

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -76,13 +76,13 @@ void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
7676
// We will launch one kernel for each pixel location, and have the kernel
7777
// go through all the channels.
7878
int n_threads = num_ * height_ * width_;
79-
// NOLINT_NEXTLINE(whitespace/operators)
79+
// NOLINT_NEXT_LINE(whitespace/operators)
8080
LRNFillScale<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
8181
n_threads, bottom_data, num_, channels_, height_, width_, size_,
8282
alpha_ / size_, scale_data);
8383
CUDA_POST_KERNEL_CHECK;
8484
n_threads = bottom[0]->count();
85-
// NOLINT_NEXTLINE(whitespace/operators)
85+
// NOLINT_NEXT_LINE(whitespace/operators)
8686
LRNComputeOutput<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
8787
n_threads, bottom_data, scale_data, -beta_, top_data);
8888
CUDA_POST_KERNEL_CHECK;
@@ -155,7 +155,7 @@ template <typename Dtype>
155155
Dtype LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
156156
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
157157
int n_threads = num_ * height_ * width_;
158-
// NOLINT_NEXTLINE(whitespace/operators)
158+
// NOLINT_NEXT_LINE(whitespace/operators)
159159
LRNComputeDiff<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
160160
n_threads, (*bottom)[0]->gpu_data(), top[0]->gpu_data(),
161161
scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_,

src/caffe/layers/pooling_layer.cu

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -146,14 +146,14 @@ void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
146146
int count = (*top)[0]->count();
147147
switch (this->layer_param_.pool()) {
148148
case LayerParameter_PoolMethod_MAX:
149-
// NOLINT_NEXTLINE(whitespace/operators)
149+
// NOLINT_NEXT_LINE(whitespace/operators)
150150
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
151151
count, bottom_data, bottom[0]->num(), CHANNELS_,
152152
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
153153
top_data);
154154
break;
155155
case LayerParameter_PoolMethod_AVE:
156-
// NOLINT_NEXTLINE(whitespace/operators)
156+
// NOLINT_NEXT_LINE(whitespace/operators)
157157
AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
158158
count, bottom_data, bottom[0]->num(), CHANNELS_,
159159
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
@@ -164,14 +164,14 @@ void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
164164
// We need to create the random index as well.
165165
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(),
166166
rand_idx_.mutable_gpu_data(), count));
167-
// NOLINT_NEXTLINE(whitespace/operators)
167+
// NOLINT_NEXT_LINE(whitespace/operators)
168168
StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count),
169169
CAFFE_CUDA_NUM_THREADS>>>(
170170
count, bottom_data, bottom[0]->num(), CHANNELS_,
171171
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
172172
rand_idx_.mutable_gpu_data(), top_data);
173173
} else {
174-
// NOLINT_NEXTLINE(whitespace/operators)
174+
// NOLINT_NEXT_LINE(whitespace/operators)
175175
StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count),
176176
CAFFE_CUDA_NUM_THREADS>>>(
177177
count, bottom_data, bottom[0]->num(), CHANNELS_,
@@ -294,21 +294,21 @@ Dtype PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
294294
int count = (*bottom)[0]->count();
295295
switch (this->layer_param_.pool()) {
296296
case LayerParameter_PoolMethod_MAX:
297-
// NOLINT_NEXTLINE(whitespace/operators)
297+
// NOLINT_NEXT_LINE(whitespace/operators)
298298
MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
299299
count, (*bottom)[0]->gpu_data(), top[0]->gpu_data(), top_diff,
300300
top[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_,
301301
POOLED_WIDTH_, KSIZE_, STRIDE_, bottom_diff);
302302
break;
303303
case LayerParameter_PoolMethod_AVE:
304-
// NOLINT_NEXTLINE(whitespace/operators)
304+
// NOLINT_NEXT_LINE(whitespace/operators)
305305
AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
306306
count, top_diff, top[0]->num(), CHANNELS_,
307307
HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
308308
bottom_diff);
309309
break;
310310
case LayerParameter_PoolMethod_STOCHASTIC:
311-
// NOLINT_NEXTLINE(whitespace/operators)
311+
// NOLINT_NEXT_LINE(whitespace/operators)
312312
StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
313313
count, rand_idx_.gpu_data(), top_diff,
314314
top[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_,

src/caffe/layers/relu_layer.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
5151
const Dtype* bottom_data = bottom[0]->gpu_data();
5252
Dtype* top_data = (*top)[0]->mutable_gpu_data();
5353
const int count = bottom[0]->count();
54-
// NOLINT_NEXTLINE(whitespace/operators)
54+
// NOLINT_NEXT_LINE(whitespace/operators)
5555
ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
5656
count, bottom_data, top_data);
5757
CUDA_POST_KERNEL_CHECK;
@@ -80,7 +80,7 @@ Dtype ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
8080
const Dtype* top_diff = top[0]->gpu_diff();
8181
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
8282
const int count = (*bottom)[0]->count();
83-
// NOLINT_NEXTLINE(whitespace/operators)
83+
// NOLINT_NEXT_LINE(whitespace/operators)
8484
ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
8585
count, top_diff, bottom_data, bottom_diff);
8686
CUDA_POST_KERNEL_CHECK;

src/caffe/layers/sigmoid_layer.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ void SigmoidLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
6565
const Dtype* bottom_data = bottom[0]->gpu_data();
6666
Dtype* top_data = (*top)[0]->mutable_gpu_data();
6767
const int count = bottom[0]->count();
68-
// NOLINT_NEXTLINE(whitespace/operators)
68+
// NOLINT_NEXT_LINE(whitespace/operators)
6969
SigmoidForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
7070
count, bottom_data, top_data);
7171
CUDA_POST_KERNEL_CHECK;
@@ -95,7 +95,7 @@ Dtype SigmoidLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
9595
const Dtype* top_diff = top[0]->gpu_diff();
9696
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
9797
const int count = (*bottom)[0]->count();
98-
// NOLINT_NEXTLINE(whitespace/operators)
98+
// NOLINT_NEXT_LINE(whitespace/operators)
9999
SigmoidBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
100100
count, top_diff, bottom_data, bottom_diff);
101101
CUDA_POST_KERNEL_CHECK;

src/caffe/layers/softmax_layer.cu

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -105,21 +105,21 @@ void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
105105
// we need to subtract the max to avoid numerical issues, compute the exp,
106106
// and then normalize.
107107
// Compute max
108-
// NOLINT_NEXTLINE(whitespace/operators)
108+
// NOLINT_NEXT_LINE(whitespace/operators)
109109
kernel_get_max<Dtype><<<CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS>>>(
110110
num, dim, bottom_data, scale_data);
111111
// subtraction
112112
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
113113
scale_data, sum_multiplier_.gpu_data(), 1., top_data);
114114
// Perform exponentiation
115-
// NOLINT_NEXTLINE(whitespace/operators)
115+
// NOLINT_NEXT_LINE(whitespace/operators)
116116
kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(num * dim), CAFFE_CUDA_NUM_THREADS>>>(
117117
num * dim, top_data, top_data);
118118
// sum after exp
119119
caffe_gpu_gemv<Dtype>(CblasNoTrans, num, dim, 1., top_data,
120120
sum_multiplier_.gpu_data(), 0., scale_data);
121121
// Do division
122-
// NOLINT_NEXTLINE(whitespace/operators)
122+
// NOLINT_NEXT_LINE(whitespace/operators)
123123
kernel_softmax_div<Dtype><<<CAFFE_GET_BLOCKS(num * dim),
124124
CAFFE_CUDA_NUM_THREADS>>>(
125125
num, dim, scale_data, top_data);

0 commit comments

Comments
 (0)