Skip to content

Commit 1db0258

Browse files
authored
Generic reshape (#213)
* Generic reshape * Update contributors * Update features list
1 parent 5cf14eb commit 1db0258

15 files changed

+59
-75
lines changed

CMakeLists.txt

+2-2
Original file line numberDiff line numberDiff line change
@@ -64,10 +64,10 @@ add_library(neural-fortran
6464
src/nf/nf_parallel.f90
6565
src/nf/nf_parallel_submodule.f90
6666
src/nf/nf_random.f90
67-
src/nf/nf_reshape_layer.f90
68-
src/nf/nf_reshape_layer_submodule.f90
6967
src/nf/nf_reshape2d_layer.f90
7068
src/nf/nf_reshape2d_layer_submodule.f90
69+
src/nf/nf_reshape3d_layer.f90
70+
src/nf/nf_reshape3d_layer_submodule.f90
7171
src/nf/nf_self_attention_layer.f90
7272
src/nf/io/nf_io_binary.f90
7373
src/nf/io/nf_io_binary_submodule.f90

README.md

+6-5
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@ Read the paper [here](https://arxiv.org/abs/1902.06714).
1515

1616
## Features
1717

18-
* Training and inference of dense (fully connected) and convolutional neural
19-
networks
18+
* Training and inference of dense (fully connected), convolutional (1-d and 2-d),
19+
and transformer neural networks
2020
* Stochastic gradient descent optimizers: Classic, momentum, Nesterov momentum,
2121
RMSProp, Adagrad, Adam, AdamW
2222
* More than a dozen activation functions and their derivatives
@@ -41,9 +41,8 @@ Read the paper [here](https://arxiv.org/abs/1902.06714).
4141
| Linear (2-d) | `linear2d` | `input2d`, `layernorm`, `linear2d`, `self_attention` | 2 |||
4242
| Self-attention | `self_attention` | `input2d`, `layernorm`, `linear2d`, `self_attention` | 2 |||
4343
| Layer Normalization | `layernorm` | `linear2d`, `self_attention` | 2 |||
44-
| Flatten | `flatten` | `input2d`, `input3d`, `conv2d`, `maxpool2d`, `reshape` | 1 |||
45-
| Reshape (1-d to 2-d) | `reshape2d` | `input2d`, `conv1d`, `locally_connected1d`, `maxpool1d` | 2 |||
46-
| Reshape (1-d to 3-d) | `reshape` | `input1d`, `dense`, `flatten` | 3 |||
44+
| Flatten | `flatten` | `input2d`, `input3d`, `conv1d`, `conv2d`, `maxpool1d`, `maxpool2d`, `reshape` | 1 |||
45+
| Reshape (1-d to 2-d or 3-d) | `reshape` | `dense`, `dropout`, `flatten`, `input1d` | 2, 3 |||
4746

4847
## Getting started
4948

@@ -263,11 +262,13 @@ It may be useful to read if you want to contribute a new feature to neural-fortr
263262

264263
Thanks to all open-source contributors to neural-fortran:
265264
[awvwgk](https://github.com/awvwgk),
265+
[certik](https://github.com/certik),
266266
[ggoyman](https://github.com/ggoyman),
267267
[ivan-pi](https://github.com/ivan-pi),
268268
[jacobwilliams](https://github.com/jacobwilliams),
269269
[jvdp1](https://github.com/jvdp1),
270270
[jvo203](https://github.com/jvo203),
271+
[mathomp4](https://github.com/mathomp4),
271272
[milancurcic](https://github.com/milancurcic),
272273
[OneAdder](https://github.com/OneAdder),
273274
[pirpyn](https://github.com/pirpyn),

example/cnn_mnist.f90

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ program cnn_mnist
2020

2121
net = network([ &
2222
input(784), &
23-
reshape([1,28,28]), &
23+
reshape(1, 28, 28), &
2424
conv2d(filters=8, kernel_size=3, activation=relu()), &
2525
maxpool2d(pool_size=2), &
2626
conv2d(filters=16, kernel_size=3, activation=relu()), &

example/cnn_mnist_1d.f90

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
program cnn_mnist_1d
22

33
use nf, only: network, sgd, &
4-
input, conv1d, maxpool1d, flatten, dense, reshape, reshape2d, locally_connected1d, &
4+
input, conv1d, maxpool1d, flatten, dense, reshape, locally_connected1d, &
55
load_mnist, label_digits, softmax, relu
66

77
implicit none
@@ -20,7 +20,7 @@ program cnn_mnist_1d
2020

2121
net = network([ &
2222
input(784), &
23-
reshape2d([28, 28]), &
23+
reshape(28, 28), &
2424
locally_connected1d(filters=8, kernel_size=3, activation=relu()), &
2525
maxpool1d(pool_size=2), &
2626
locally_connected1d(filters=16, kernel_size=3, activation=relu()), &

fpm.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
name = "neural-fortran"
2-
version = "0.20.0"
2+
version = "0.21.0"
33
license = "MIT"
44
author = "Milan Curcic"
55
maintainer = "mcurcic@miami.edu"

src/nf.f90

-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ module nf
1616
maxpool1d, &
1717
maxpool2d, &
1818
reshape, &
19-
reshape2d, &
2019
self_attention
2120
use nf_loss, only: mse, quadratic
2221
use nf_metrics, only: corr, maxabs

src/nf/nf_layer_constructors.f90

+22-23
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ module nf_layer_constructors
2020
maxpool1d, &
2121
maxpool2d, &
2222
reshape, &
23-
reshape2d, &
2423
self_attention, &
2524
embedding, &
2625
layernorm
@@ -94,6 +93,28 @@ end function input3d
9493

9594
end interface input
9695

96+
97+
interface reshape
98+
99+
module function reshape2d(dim1, dim2) result(res)
100+
!! Rank-1 to rank-2 reshape layer constructor.
101+
integer, intent(in) :: dim1, dim2
102+
!! Shape of the output
103+
type(layer) :: res
104+
!! Resulting layer instance
105+
end function reshape2d
106+
107+
module function reshape3d(dim1, dim2, dim3) result(res)
108+
!! Rank-1 to rank-3 reshape layer constructor.
109+
integer, intent(in) :: dim1, dim2, dim3
110+
!! Shape of the output
111+
type(layer) :: res
112+
!! Resulting layer instance
113+
end function reshape3d
114+
115+
end interface reshape
116+
117+
97118
interface
98119

99120
module function dense(layer_size, activation) result(res)
@@ -283,28 +304,6 @@ module function maxpool2d(pool_size, stride) result(res)
283304
!! Resulting layer instance
284305
end function maxpool2d
285306

286-
module function reshape(output_shape) result(res)
287-
!! Rank-1 to rank-any reshape layer constructor.
288-
!! Currently implemented is only rank-3 for the output of the reshape.
289-
!!
290-
!! This layer is for connecting 1-d inputs to conv2d or similar layers.
291-
integer, intent(in) :: output_shape(:)
292-
!! Shape of the output
293-
type(layer) :: res
294-
!! Resulting layer instance
295-
end function reshape
296-
297-
module function reshape2d(output_shape) result(res)
298-
!! Rank-1 to rank-any reshape layer constructor.
299-
!! Currently implemented is only rank-2 for the output of the reshape.
300-
!!
301-
!! This layer is for connecting 1-d inputs to conv1d or similar layers.
302-
integer, intent(in) :: output_shape(:)
303-
!! Shape of the output
304-
type(layer) :: res
305-
!! Resulting layer instance
306-
end function reshape2d
307-
308307
module function linear2d(out_features) result(res)
309308
!! Rank-2 (sequence_length, out_features) linear layer constructor.
310309
!! sequence_length is determined at layer initialization, based on the

src/nf/nf_layer_constructors_submodule.f90

+13-26
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,8 @@
1212
use nf_locally_connected1d_layer, only: locally_connected1d_layer
1313
use nf_maxpool1d_layer, only: maxpool1d_layer
1414
use nf_maxpool2d_layer, only: maxpool2d_layer
15-
use nf_reshape_layer, only: reshape3d_layer
1615
use nf_reshape2d_layer, only: reshape2d_layer
16+
use nf_reshape3d_layer, only: reshape3d_layer
1717
use nf_linear2d_layer, only: linear2d_layer
1818
use nf_self_attention_layer, only: self_attention_layer
1919
use nf_embedding_layer, only: embedding_layer
@@ -229,35 +229,22 @@ module function maxpool2d(pool_size, stride) result(res)
229229
end function maxpool2d
230230

231231

232-
module function reshape(output_shape) result(res)
233-
integer, intent(in) :: output_shape(:)
234-
type(layer) :: res
235-
236-
res % name = 'reshape'
237-
res % layer_shape = output_shape
238-
239-
if (size(output_shape) == 3) then
240-
allocate(res % p, source=reshape3d_layer(output_shape))
241-
else
242-
error stop 'size(output_shape) of the reshape layer must == 3'
243-
end if
244-
245-
end function reshape
246-
247-
module function reshape2d(output_shape) result(res)
248-
integer, intent(in) :: output_shape(:)
232+
module function reshape2d(dim1, dim2) result(res)
233+
integer, intent(in) :: dim1, dim2
249234
type(layer) :: res
250-
251235
res % name = 'reshape2d'
252-
res % layer_shape = output_shape
236+
res % layer_shape = [dim1, dim2]
237+
allocate(res % p, source=reshape2d_layer(res % layer_shape))
238+
end function reshape2d
253239

254-
if (size(output_shape) == 2) then
255-
allocate(res % p, source=reshape2d_layer(output_shape))
256-
else
257-
error stop 'size(output_shape) of the reshape layer must == 2'
258-
end if
259240

260-
end function reshape2d
241+
module function reshape3d(dim1, dim2, dim3) result(res)
242+
integer, intent(in) :: dim1, dim2, dim3
243+
type(layer) :: res
244+
res % name = 'reshape3d'
245+
res % layer_shape = [dim1, dim2, dim3]
246+
allocate(res % p, source=reshape3d_layer(res % layer_shape))
247+
end function reshape3d
261248

262249

263250
module function linear2d(out_features) result(res)

src/nf/nf_layer_submodule.f90

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
use nf_maxpool1d_layer, only: maxpool1d_layer
1414
use nf_maxpool2d_layer, only: maxpool2d_layer
1515
use nf_reshape2d_layer, only: reshape2d_layer
16-
use nf_reshape_layer, only: reshape3d_layer
16+
use nf_reshape3d_layer, only: reshape3d_layer
1717
use nf_linear2d_layer, only: linear2d_layer
1818
use nf_self_attention_layer, only: self_attention_layer
1919
use nf_embedding_layer, only: embedding_layer

src/nf/nf_network_submodule.f90

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,13 @@
1212
use nf_maxpool1d_layer, only: maxpool1d_layer
1313
use nf_maxpool2d_layer, only: maxpool2d_layer
1414
use nf_reshape2d_layer, only: reshape2d_layer
15-
use nf_reshape_layer, only: reshape3d_layer
15+
use nf_reshape3d_layer, only: reshape3d_layer
1616
use nf_linear2d_layer, only: linear2d_layer
1717
use nf_self_attention_layer, only: self_attention_layer
1818
use nf_embedding_layer, only: embedding_layer
1919
use nf_layernorm_layer, only: layernorm_layer
2020
use nf_layer, only: layer
21-
use nf_layer_constructors, only: conv1d, conv2d, dense, flatten, input, maxpool1d, maxpool2d, reshape, reshape2d
21+
use nf_layer_constructors, only: conv1d, conv2d, dense, flatten, input, maxpool1d, maxpool2d, reshape
2222
use nf_loss, only: quadratic
2323
use nf_optimizers, only: optimizer_base_type, sgd
2424
use nf_parallel, only: tile_indices

src/nf/nf_reshape_layer.f90 renamed to src/nf/nf_reshape3d_layer.f90

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
module nf_reshape_layer
1+
module nf_reshape3d_layer
22

33
!! This module provides the concrete reshape layer type.
44
!! It is used internally by the layer type.
@@ -73,4 +73,4 @@ end subroutine init
7373

7474
end interface
7575

76-
end module nf_reshape_layer
76+
end module nf_reshape3d_layer

src/nf/nf_reshape_layer_submodule.f90 renamed to src/nf/nf_reshape3d_layer_submodule.f90

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
submodule(nf_reshape_layer) nf_reshape_layer_submodule
1+
submodule(nf_reshape3d_layer) nf_reshape3d_layer_submodule
22

33
use nf_base_layer, only: base_layer
44

@@ -48,4 +48,4 @@ module subroutine init(self, input_shape)
4848

4949
end subroutine init
5050

51-
end submodule nf_reshape_layer_submodule
51+
end submodule nf_reshape3d_layer_submodule

test/test_insert_flatten.f90

+1-1
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ program test_insert_flatten
4545

4646
net = network([ &
4747
input(4), &
48-
reshape([1, 2, 2]), &
48+
reshape(1, 2, 2), &
4949
dense(4) &
5050
])
5151

test/test_reshape2d_layer.f90

+2-4
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,21 @@
11
program test_reshape2d_layer
22

33
use iso_fortran_env, only: stderr => error_unit
4-
use nf, only: input, network, reshape2d_layer => reshape2d
5-
use nf_datasets, only: download_and_unpack, keras_reshape_url
4+
use nf, only: input, network, reshape2d => reshape
65

76
implicit none
87

98
type(network) :: net
109
real, allocatable :: sample_input(:), output(:,:)
1110
integer, parameter :: output_shape(2) = [4,4]
1211
integer, parameter :: input_size = product(output_shape)
13-
character(*), parameter :: keras_reshape_path = 'keras_reshape.h5'
1412
logical :: file_exists
1513
logical :: ok = .true.
1614

1715
! Create the network
1816
net = network([ &
1917
input(input_size), &
20-
reshape2d_layer(output_shape) &
18+
reshape2d(output_shape(1), output_shape(2)) &
2119
])
2220

2321
if (.not. size(net % layers) == 2) then

test/test_reshape_layer.f90

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
program test_reshape_layer
22

33
use iso_fortran_env, only: stderr => error_unit
4-
use nf, only: input, network, reshape_layer => reshape
4+
use nf, only: input, network, reshape3d => reshape
55
use nf_datasets, only: download_and_unpack, keras_reshape_url
66

77
implicit none
@@ -17,7 +17,7 @@ program test_reshape_layer
1717
! Create the network
1818
net = network([ &
1919
input(input_size), &
20-
reshape_layer(output_shape) &
20+
reshape3d(3, 32, 32) &
2121
])
2222

2323
if (.not. size(net % layers) == 2) then

0 commit comments

Comments
 (0)