Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download

INTERSHIP

110 views
ubuntu2204
Kernel: Python 3 (Anaconda 2022)
# Import necessary libraries import tensorflow as tf from tensorflow.keras.datasets import mnist from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten from tensorflow.keras.utils import to_categorical # Load and preprocess the MNIST dataset (x_train, y_train), (x_test, y_test) = mnist.load_data() # Normalize and reshape the input data x_train = x_train / 255.0 x_test = x_test / 255.0 x_train = x_train.reshape(x_train.shape[0], 28, 28, 1) x_test = x_test.reshape(x_test.shape[0], 28, 28, 1) # One-hot encode the target labels y_train = to_categorical(y_train, num_classes=10) y_test = to_categorical(y_test, num_classes=10) # Build the neural network model model = Sequential() model.add(Flatten(input_shape=(28, 28, 1)) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(10, activation='softmax')) # Compile the model model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # Train the model model.fit(x_train, y_train, epochs=10, batch_size=32, validation_split=0.2) # Evaluate the model on the test data test_loss, test_acc = model.evaluate(x_test, y_test) print(f'Test accuracy: {test_acc * 100:.2f}%') # Save the model model.save('mnist_digit_recognition_model.h5')
Cell In[1], line 24 model.add(Dense(128, activation='relu')) ^ SyntaxError: invalid syntax
import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from keras.models import Sequential from keras.layers import LSTM, Dense from sklearn.metrics import mean_squared_error # Load historical stock price data (you need to replace 'AAPL' and provide your own dataset) data = pd.read_csv("AAPL.csv") data = data[['Date', 'Close']] data['Date'] = pd.to_datetime(data['Date']) data.set_index('Date', inplace=True) # Normalize the data scaler = MinMaxScaler() data['Close'] = scaler.fit_transform(data[['Close']]) # Split the data into training and testing sets train_size = int(len(data) * 0.8) train_data, test_data = data[0:train_size], data[train_size:] # Convert the data to sequences for the LSTM model def create_sequences(data, sequence_length): sequences, labels = [], [] for i in range(len(data) - sequence_length): sequence = data[i:i+sequence_length] label = data.iloc[i + sequence_length] sequences.append(sequence) labels.append(label) return np.array(sequences), np.array(labels) sequence_length = 10 X_train, y_train = create_sequences(train_data, sequence_length) X_test, y_test = create_sequences(test_data, sequence_length) # Build the LSTM model model = Sequential() model.add(LSTM(units=50, input_shape=(X_train.shape[1], 1))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mean_squared_error') # Train the model model.fit(X_train, y_train, epochs=100, batch_size=64) # Make predictions train_predictions = model.predict(X_train) test_predictions = model.predict(X_test) # Inverse transform the predictions to get actual stock prices train_predictions = scaler.inverse_transform(train_predictions) test_predictions = scaler.inverse_transform(test_predictions) # Calculate the Mean Squared Error (MSE) for training and testing predictions train_mse = mean_squared_error(train_data[sequence_length:], train_predictions) test_mse = mean_squared_error(test_data[sequence_length:], test_predictions) print(f"Train MSE: {train_mse}") print(f"Test MSE: {test_mse}") # Plot the results plt.figure(figsize=(16, 8)) plt.title('Stock Price Prediction') plt.xlabel('Date') plt.ylabel('Stock Price') plt.plot(data.index[sequence_length:train_size], train_predictions, label='Training Predictions') plt.plot(data.index[train_size+sequence_length:], test_predictions, label='Testing Predictions') plt.legend() plt.show()
--------------------------------------------------------------------------- FileNotFoundError Traceback (most recent call last) Cell In[2], line 10 7 from sklearn.metrics import mean_squared_error 9 # Load historical stock price data (you need to replace 'AAPL' and provide your own dataset) ---> 10 data = pd.read_csv("AAPL.csv") 11 data = data[['Date', 'Close']] 12 data['Date'] = pd.to_datetime(data['Date'])
File /ext/anaconda2022.05/lib/python3.9/site-packages/pandas/util/_decorators.py:211, in deprecate_kwarg.<locals>._deprecate_kwarg.<locals>.wrapper(*args, **kwargs) 209 else: 210 kwargs[new_arg_name] = new_arg_value --> 211 return func(*args, **kwargs)
File /ext/anaconda2022.05/lib/python3.9/site-packages/pandas/util/_decorators.py:331, in deprecate_nonkeyword_arguments.<locals>.decorate.<locals>.wrapper(*args, **kwargs) 325 if len(args) > num_allow_args: 326 warnings.warn( 327 msg.format(arguments=_format_argument_list(allow_args)), 328 FutureWarning, 329 stacklevel=find_stack_level(), 330 ) --> 331 return func(*args, **kwargs)
File /ext/anaconda2022.05/lib/python3.9/site-packages/pandas/io/parsers/readers.py:950, in read_csv(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, encoding_errors, dialect, error_bad_lines, warn_bad_lines, on_bad_lines, delim_whitespace, low_memory, memory_map, float_precision, storage_options) 935 kwds_defaults = _refine_defaults_read( 936 dialect, 937 delimiter, (...) 946 defaults={"delimiter": ","}, 947 ) 948 kwds.update(kwds_defaults) --> 950 return _read(filepath_or_buffer, kwds)
File /ext/anaconda2022.05/lib/python3.9/site-packages/pandas/io/parsers/readers.py:605, in _read(filepath_or_buffer, kwds) 602 _validate_names(kwds.get("names", None)) 604 # Create the parser. --> 605 parser = TextFileReader(filepath_or_buffer, **kwds) 607 if chunksize or iterator: 608 return parser
File /ext/anaconda2022.05/lib/python3.9/site-packages/pandas/io/parsers/readers.py:1442, in TextFileReader.__init__(self, f, engine, **kwds) 1439 self.options["has_index_names"] = kwds["has_index_names"] 1441 self.handles: IOHandles | None = None -> 1442 self._engine = self._make_engine(f, self.engine)
File /ext/anaconda2022.05/lib/python3.9/site-packages/pandas/io/parsers/readers.py:1735, in TextFileReader._make_engine(self, f, engine) 1733 if "b" not in mode: 1734 mode += "b" -> 1735 self.handles = get_handle( 1736 f, 1737 mode, 1738 encoding=self.options.get("encoding", None), 1739 compression=self.options.get("compression", None), 1740 memory_map=self.options.get("memory_map", False), 1741 is_text=is_text, 1742 errors=self.options.get("encoding_errors", "strict"), 1743 storage_options=self.options.get("storage_options", None), 1744 ) 1745 assert self.handles is not None 1746 f = self.handles.handle
File /ext/anaconda2022.05/lib/python3.9/site-packages/pandas/io/common.py:856, in get_handle(path_or_buf, mode, encoding, compression, memory_map, is_text, errors, storage_options) 851 elif isinstance(handle, str): 852 # Check whether the filename is to be opened in binary mode. 853 # Binary mode does not support 'encoding' and 'newline'. 854 if ioargs.encoding and "b" not in ioargs.mode: 855 # Encoding --> 856 handle = open( 857 handle, 858 ioargs.mode, 859 encoding=ioargs.encoding, 860 errors=errors, 861 newline="", 862 ) 863 else: 864 # Binary mode 865 handle = open(handle, ioargs.mode)
FileNotFoundError: [Errno 2] No such file or directory: 'AAPL.csv'