Neural network: estimation of the frequency of a sine wave

In order to study Keras LSTM and RNN, I decided to create a simple task for work: taking into account the sine wave, can we predict its frequency?

I would not expect a simple neural network to be able to predict the frequency, given that the concept of time is important. However, even with LSTM I cannot find out the frequency; I can recognize the trivial zero as the calculated frequency (even for train samples).

Here is the code for creating the train set.

import numpy as np
import matplotlib.pyplot as plt

def create_sine(frequency):
    return np.sin(frequency*np.linspace(0, 2*np.pi, 2000))

train_x = np.array([create_sine(x) for x in range(1, 300)])
train_y = list(range(1, 300))

Now, here is a simple neural network for this example.

from keras.models import Model
from keras.layers import Dense, Input, LSTM

input_series = Input(shape=(2000,),name='Input')
dense_1 = Dense(100)(input_series)
pred = Dense(1, activation='relu')(dense_1)
model = Model(input_series, pred)
model.compile('adam','mean_absolute_error')
model.fit(train_x[:100], train_y[:100], epochs=100)

As expected, this NN will not learn anything useful. Then I tried a simple LSTM example.

input_series = Input(shape=(2000,1),name='Input')
lstm = LSTM(100)(input_series)
pred = Dense(1, activation='relu')(lstm)
model = Model(input_series, pred)
model.compile('adam','mean_absolute_error')
model.fit(train_x[:100].reshape(100, 2000, 1), train_y[:100], epochs=100)

However, this LSTM-based model also does not recognize anything useful.

+6
3

?

, RNN, :

  • , . (300 * 2000 ), () . - , , .

  • . , RNN . , NLP , RNN , .. .

    : frequency=1 frequency=100. , . , RNN , , , RNN, , .

  • . 1 300 x, y , .

, , 10, ( : , , , 50 ).

RNN 10 . , (Dropout).

:

import numpy as np
from keras.models import Model
from keras.layers import Input, Dense, Dropout, LSTM

max_freq = 10
time_steps = 100

def create_sine(frequency, offset):
  return np.sin(frequency * np.linspace(offset, 2 * np.pi + offset, time_steps))

train_y = list(range(1, max_freq)) * 10
train_x = np.array([create_sine(freq, np.random.uniform(0,1)) for freq in train_y])
train_y = np.array(train_y)

input_series = Input(shape=(time_steps, 1), name='Input')
lstm = LSTM(units=100)(input_series)
hidden = Dense(units=100, activation='relu')(lstm)
dropout = Dropout(rate=0.1)(hidden)
output = Dense(units=1, activation='relu')(dropout)

model = Model(input_series, output)
model.compile('adam', 'mean_squared_error')
model.fit(train_x.reshape(-1, time_steps, 1), train_y, epochs=200)

# Trying the network on the same data
test_x = train_x.reshape(-1, time_steps, 1)
test_y = train_y
predicted = model.predict(test_x).reshape([-1])
print()
print((predicted - train_y)[:12])
print(np.mean(np.abs(predicted - train_y)))

:

max_freq = 10

[-0.05612183 -0.01982236 -0.03744316 -0.02568841 -0.11959982 -0.0770483
  0.04643679  0.12057972 -0.00625324 -0.00724655 -0.16919005 -0.04512954]
0.0503574344847

max_freq = 20 ( - )

[ 0.51365542  0.09269333 -0.009691    0.0619092   0.09852839  0.04378462
  0.01430321 -0.01953268  0.00722599  0.02558327 -0.04520988 -0.0614748 ]
0.146024380232

max_freq = 30 ( - )

[-0.28205156 -0.28922796 -0.00569081 -0.21314907  0.1068716   0.23497915
  0.23975039  0.25955486  0.26333141  0.24235058  0.08320332 -0.03686047]
0.406703719805

, , max_freq . , , , , .

+3

, ,

,

-1 1

+1

, . LSTM. , . , keras, tflearn.

import numpy as np
import tflearn
from random import shuffle

# parameters
n_input=100
n_train=2000
n_test = 500
# generate data
xs=[]
ys=[]
frequencies = np.linspace(1,50,n_train+n_test)
shuffle(frequencies)

t=np.linspace(0,2*np.pi,n_input)
for freq in frequencies:
    xs.append(np.sin(t*freq))
    ys.append(freq)

xs_train=np.array(xs[:n_train]).reshape(n_train,n_input,1)
xs_test=np.array(xs[n_train:]).reshape(n_test,n_input,1)
ys_train = np.array(ys[:n_train]).reshape(-1,1)
ys_test = np.array(ys[n_train:]).reshape(-1,1)

# LSTM network prediction
net = tflearn.input_data(shape=[None, n_input, 1])
net = tflearn.lstm(net, 10)
net = tflearn.fully_connected(net, 100, activation="relu")
net = tflearn.fully_connected(net, 1)
net = tflearn.regression(net, optimizer='adam', loss='mean_square')
model = tflearn.DNN(net)
model.fit(xs_train, ys_train, n_epoch=100)

print(np.hstack((model.predict(xs_test),ys_test))[:10])
# [[ 13.08494568  12.76470588]
#  [ 22.23135376  21.98039216]
#  [ 39.0812912   37.58823529]
#  [ 15.77548409  15.66666667]
#  [ 26.57996941  25.58823529]
#  [ 26.57759476  25.11764706]
#  [ 16.42217445  15.8627451 ]
#  [ 32.55020905  30.80392157]
#  [ 44.16622925  43.01960784]
#  [ 26.18071365  25.45098039]]

, LSTM, LSTM Deep Neural Network:

# Deep network instead of LSTM
net = tflearn.input_data(shape=[None, n_input])
net = tflearn.fully_connected(net, 100)
net = tflearn.fully_connected(net, 100)
net = tflearn.fully_connected(net, 1)
net = tflearn.regression(net, optimizer='adam',loss='mean_square')

model = tflearn.DNN(net)
model.fit(xs_train, ys_train)
print(np.hstack((model.predict(xs_test),ys_test))[:10])

. gist .

+1

All Articles