import yfinance as yf
# Define the ticker symbol
tickerSymbol = 'AAPL'
# Get data for this ticker
tickerData = yf.Ticker(tickerSymbol)
# Get the historical prices for this ticker
tickerDf = tickerData.history(period='1d', start='2022-1-1', end='2023-1-25')
# See your data
print(tickerDf)
Open High Low Close \ Date 2022-01-03 00:00:00-05:00 176.290490 181.296774 176.171534 180.434296 2022-01-04 00:00:00-05:00 181.048929 181.356243 177.569307 178.144287 2022-01-05 00:00:00-05:00 178.055086 178.610235 173.128111 173.405685 2022-01-06 00:00:00-05:00 171.204893 173.782390 170.154072 170.510956 2022-01-07 00:00:00-05:00 171.393242 172.632420 169.549344 170.679474 ... ... ... ... ... 2023-01-18 00:00:00-05:00 136.422724 138.207519 134.637913 134.817398 2023-01-19 00:00:00-05:00 133.690681 135.854378 133.381583 134.877228 2023-01-20 00:00:00-05:00 134.887190 137.619239 133.830270 137.469666 2023-01-23 00:00:00-05:00 137.718933 142.903846 137.499571 140.700256 2023-01-24 00:00:00-05:00 139.902580 142.744310 139.892614 142.116135 Volume Dividends Stock Splits Date 2022-01-03 00:00:00-05:00 104487900 0.0 0.0 2022-01-04 00:00:00-05:00 99310400 0.0 0.0 2022-01-05 00:00:00-05:00 94537600 0.0 0.0 2022-01-06 00:00:00-05:00 96904000 0.0 0.0 2022-01-07 00:00:00-05:00 86709100 0.0 0.0 ... ... ... ... 2023-01-18 00:00:00-05:00 69672800 0.0 0.0 2023-01-19 00:00:00-05:00 58280400 0.0 0.0 2023-01-20 00:00:00-05:00 80223600 0.0 0.0 2023-01-23 00:00:00-05:00 81760300 0.0 0.0 2023-01-24 00:00:00-05:00 66435100 0.0 0.0 [266 rows x 7 columns]
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
# Use only closing price data for simplicity
data = tickerDf['Close'].values
data = data.reshape(-1, 1)
# Scale the data
scaler = MinMaxScaler(feature_range=(0,1))
data = scaler.fit_transform(data)
# Define a data preparation function
def create_dataset(data, look_back=1):
X, Y = [], []
for i in range(len(data)-look_back-1):
a = data[i:(i+look_back), 0]
X.append(a)
Y.append(data[i + look_back, 0])
return np.array(X), np.array(Y)
# Prepare the data
look_back = 10
X, Y = create_dataset(data, look_back)
# Reshape input to be [samples, time steps, features]
X = np.reshape(X, (X.shape[0], 1, X.shape[1]))
# Define the LSTM model
model = Sequential()
model.add(LSTM(4, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
# Train the model
model.fit(X, Y, epochs=100, batch_size=1, verbose=2)
2023-05-25 12:20:45.675319: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_2_grad/concat/split_2/split_dim' with dtype int32 [[{{node gradients/split_2_grad/concat/split_2/split_dim}}]] 2023-05-25 12:20:45.680324: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_grad/concat/split/split_dim' with dtype int32 [[{{node gradients/split_grad/concat/split/split_dim}}]] 2023-05-25 12:20:45.685267: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_1_grad/concat/split_1/split_dim' with dtype int32 [[{{node gradients/split_1_grad/concat/split_1/split_dim}}]]
Epoch 1/100
2023-05-25 12:20:46.476901: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_2_grad/concat/split_2/split_dim' with dtype int32 [[{{node gradients/split_2_grad/concat/split_2/split_dim}}]] 2023-05-25 12:20:46.482395: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_grad/concat/split/split_dim' with dtype int32 [[{{node gradients/split_grad/concat/split/split_dim}}]] 2023-05-25 12:20:46.485889: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_1_grad/concat/split_1/split_dim' with dtype int32 [[{{node gradients/split_1_grad/concat/split_1/split_dim}}]] 2023-05-25 12:20:48.523382: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_2_grad/concat/split_2/split_dim' with dtype int32 [[{{node gradients/split_2_grad/concat/split_2/split_dim}}]] 2023-05-25 12:20:48.530612: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_grad/concat/split/split_dim' with dtype int32 [[{{node gradients/split_grad/concat/split/split_dim}}]] 2023-05-25 12:20:48.535602: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'gradients/split_1_grad/concat/split_1/split_dim' with dtype int32 [[{{node gradients/split_1_grad/concat/split_1/split_dim}}]]
255/255 - 5s - loss: 0.1739 - 5s/epoch - 21ms/step Epoch 2/100 255/255 - 1s - loss: 0.0155 - 915ms/epoch - 4ms/step Epoch 3/100 255/255 - 1s - loss: 0.0103 - 937ms/epoch - 4ms/step Epoch 4/100 255/255 - 1s - loss: 0.0076 - 1s/epoch - 5ms/step Epoch 5/100 255/255 - 1s - loss: 0.0066 - 1s/epoch - 5ms/step Epoch 6/100 255/255 - 1s - loss: 0.0060 - 1s/epoch - 5ms/step Epoch 7/100 255/255 - 1s - loss: 0.0059 - 1s/epoch - 4ms/step Epoch 8/100 255/255 - 1s - loss: 0.0057 - 1s/epoch - 5ms/step Epoch 9/100 255/255 - 1s - loss: 0.0055 - 1s/epoch - 4ms/step Epoch 10/100 255/255 - 1s - loss: 0.0053 - 1s/epoch - 5ms/step Epoch 11/100 255/255 - 1s - loss: 0.0052 - 984ms/epoch - 4ms/step Epoch 12/100 255/255 - 1s - loss: 0.0052 - 1s/epoch - 5ms/step Epoch 13/100 255/255 - 1s - loss: 0.0050 - 1s/epoch - 5ms/step Epoch 14/100 255/255 - 2s - loss: 0.0049 - 2s/epoch - 6ms/step Epoch 15/100 255/255 - 1s - loss: 0.0051 - 1s/epoch - 4ms/step Epoch 16/100
--------------------------------------------------------------------------- KeyboardInterrupt Traceback (most recent call last) /home/kaylee/vscode/myproject/_notebooks/2023-05-25-stock-predictor.ipynb Cell 2 in <cell line: 37>() <a href='vscode-notebook-cell://wsl%2Bubuntu/home/kaylee/vscode/myproject/_notebooks/2023-05-25-stock-predictor.ipynb#W1sdnNjb2RlLXJlbW90ZQ%3D%3D?line=33'>34</a> model.compile(loss='mean_squared_error', optimizer='adam') <a href='vscode-notebook-cell://wsl%2Bubuntu/home/kaylee/vscode/myproject/_notebooks/2023-05-25-stock-predictor.ipynb#W1sdnNjb2RlLXJlbW90ZQ%3D%3D?line=35'>36</a> # Train the model ---> <a href='vscode-notebook-cell://wsl%2Bubuntu/home/kaylee/vscode/myproject/_notebooks/2023-05-25-stock-predictor.ipynb#W1sdnNjb2RlLXJlbW90ZQ%3D%3D?line=36'>37</a> model.fit(X, Y, epochs=100, batch_size=1, verbose=2) File ~/anaconda3/lib/python3.9/site-packages/keras/utils/traceback_utils.py:65, in filter_traceback.<locals>.error_handler(*args, **kwargs) 63 filtered_tb = None 64 try: ---> 65 return fn(*args, **kwargs) 66 except Exception as e: 67 filtered_tb = _process_traceback_frames(e.__traceback__) File ~/anaconda3/lib/python3.9/site-packages/keras/engine/training.py:1685, in Model.fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing) 1677 with tf.profiler.experimental.Trace( 1678 "train", 1679 epoch_num=epoch, (...) 1682 _r=1, 1683 ): 1684 callbacks.on_train_batch_begin(step) -> 1685 tmp_logs = self.train_function(iterator) 1686 if data_handler.should_sync: 1687 context.async_wait() File ~/anaconda3/lib/python3.9/site-packages/tensorflow/python/util/traceback_utils.py:150, in filter_traceback.<locals>.error_handler(*args, **kwargs) 148 filtered_tb = None 149 try: --> 150 return fn(*args, **kwargs) 151 except Exception as e: 152 filtered_tb = _process_traceback_frames(e.__traceback__) File ~/anaconda3/lib/python3.9/site-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py:894, in Function.__call__(self, *args, **kwds) 891 compiler = "xla" if self._jit_compile else "nonXla" 893 with OptionalXlaContext(self._jit_compile): --> 894 result = self._call(*args, **kwds) 896 new_tracing_count = self.experimental_get_tracing_count() 897 without_tracing = (tracing_count == new_tracing_count) File ~/anaconda3/lib/python3.9/site-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py:926, in Function._call(self, *args, **kwds) 923 self._lock.release() 924 # In this case we have created variables on the first call, so we run the 925 # defunned version which is guaranteed to never create variables. --> 926 return self._no_variable_creation_fn(*args, **kwds) # pylint: disable=not-callable 927 elif self._variable_creation_fn is not None: 928 # Release the lock early so that multiple threads can perform the call 929 # in parallel. 930 self._lock.release() File ~/anaconda3/lib/python3.9/site-packages/tensorflow/python/eager/polymorphic_function/tracing_compiler.py:143, in TracingCompiler.__call__(self, *args, **kwargs) 140 with self._lock: 141 (concrete_function, 142 filtered_flat_args) = self._maybe_define_function(args, kwargs) --> 143 return concrete_function._call_flat( 144 filtered_flat_args, captured_inputs=concrete_function.captured_inputs) File ~/anaconda3/lib/python3.9/site-packages/tensorflow/python/eager/polymorphic_function/monomorphic_function.py:1757, in ConcreteFunction._call_flat(self, args, captured_inputs, cancellation_manager) 1753 possible_gradient_type = gradients_util.PossibleTapeGradientTypes(args) 1754 if (possible_gradient_type == gradients_util.POSSIBLE_GRADIENT_TYPES_NONE 1755 and executing_eagerly): 1756 # No tape is watching; skip to running the function. -> 1757 return self._build_call_outputs(self._inference_function.call( 1758 ctx, args, cancellation_manager=cancellation_manager)) 1759 forward_backward = self._select_forward_and_backward_functions( 1760 args, 1761 possible_gradient_type, 1762 executing_eagerly) 1763 forward_function, args_with_tangents = forward_backward.forward() File ~/anaconda3/lib/python3.9/site-packages/tensorflow/python/eager/polymorphic_function/monomorphic_function.py:381, in _EagerDefinedFunction.call(self, ctx, args, cancellation_manager) 379 with _InterpolateFunctionError(self): 380 if cancellation_manager is None: --> 381 outputs = execute.execute( 382 str(self.signature.name), 383 num_outputs=self._num_outputs, 384 inputs=args, 385 attrs=attrs, 386 ctx=ctx) 387 else: 388 outputs = execute.execute_with_cancellation( 389 str(self.signature.name), 390 num_outputs=self._num_outputs, (...) 393 ctx=ctx, 394 cancellation_manager=cancellation_manager) File ~/anaconda3/lib/python3.9/site-packages/tensorflow/python/eager/execute.py:52, in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name) 50 try: 51 ctx.ensure_initialized() ---> 52 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name, 53 inputs, attrs, num_outputs) 54 except core._NotOkStatusException as e: 55 if name is not None: KeyboardInterrupt: