Learn practical skills, build real-world projects, and advance your career
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load

import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)

# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory

import os
for dirname, _, filenames in os.walk('/kaggle/input'):
    for filename in filenames:
        print(os.path.join(dirname, filename))

# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" 
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Set your own project id here
PROJECT_ID = 'kaggle_notebooks'
from google.cloud import storage
storage_client = storage.Client(project=PROJECT_ID)
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn import datasets
# Loading the Breast Cancer Dataset and distributing it across (X and y)
dataset = datasets.load_breast_cancer()
X,y = dataset.data, dataset.target
n_samples, n_features = X.shape
print(n_samples,'  ', n_features)
569 30
# Splitting the Data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 12)
print(X_train.shape[0], X_test.shape[0])
455 114