from sklearn.model_selection import train_test_split
X = df.drop(['target'],axis=1).values # independant features
y = df['target'].values # dependant variable
# Choose your test size to split between training and testing sets:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
from sklearn.model_selection import train_test_split
X = df.drop("target", axis=1)
y = df["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
#Let us now split the dataset into train & test
from sklearn.model_selection import train_test_split
x_train,x_test, y_train, y_test = train_test_split(X, y, test_size = 0.30, random_state=0)
print("x_train ",x_train.shape)
print("x_test ",x_test.shape)
print("y_train ",y_train.shape)
print("y_test ",y_test.shape)
##sklearn train test split
from sklearn.model_selection import train_test_split
X = df.drop(['target'],axis=1).values # independant features
y = df['target'].values # dependant variable
# Choose your test size to split between training and testing sets:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
#OR Randomly split your whole dataset to your desired percentage, insted of using a ttarget variable:
training_data = df.sample(frac=0.8, random_state=25) #here we choose 80% as our training sample and for reproduciblity, we use random_state of 42
testing_data = df.drop(training_data.index) # testing sample is 20% of our initial data
import pandas as pd
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
cal_housing = fetch_california_housing()
X = pd.DataFrame(cal_housing.data, columns=cal_housing.feature_names)
y = cal_housing.target
y -= y.mean()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0)