본문 바로가기

카테고리 없음

Spaceship Titanic Code (XgBoost Classifier)

Libraries

import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import xgboost as xgb

 

Load Datasets

train_df = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
test_df = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
# Display the first few rows of the training data
print("Training Data Head:")
print(train_df.head())
print("\nTraining Data Info:")
print(train_df.info())


Data Exploration

print("\nSummary Statistics:")
print(train_df.describe())

 

print("\nMissing Values:")
print(train_df.isnull().sum())

 

Visualize the Data

# 1. Distribution of the Target Variable (Transported)
plt.figure(figsize=(8, 6))
sns.countplot(x='Transported', data=train_df)
plt.title('Distribution of Target Variable (Transported)')
plt.show()
# 2. Age Distribution
plt.figure(figsize=(8, 6))
sns.histplot(train_df['Age'].dropna(), bins=30, kde=True)
plt.title('Age Distribution')
plt.show()
# 3. Cabin Class Distribution
plt.figure(figsize=(8, 6))
sns.countplot(x='Cabin', data=train_df)
plt.title('Cabin Class Distribution')
plt.show()

 

Data Cleaning and Feature Engineering

def fill_missing_values(df):
    df['HomePlanet'] = df['HomePlanet'].fillna('Earth')
    df['CryoSleep'] = df['CryoSleep'].fillna(False).infer_objects(copy=False)
    df['Cabin'] = df['Cabin'].fillna('Unknown')
    df['Destination'] = df['Destination'].fillna('TRAPPIST-1e')
    df['Age'] = df['Age'].fillna(df['Age'].median())
    df['VIP'] = df['VIP'].fillna(False).infer_objects(copy=False)
    df = df.fillna(0)
    return df

# Apply missing value filling
train_df = fill_missing_values(train_df)
test_df = fill_missing_values(test_df)

# Convert columns to string type to ensure uniformity
label_cols = ['HomePlanet', 'CryoSleep', 'Cabin', 'Destination', 'VIP']
for col in label_cols:
    train_df[col] = train_df[col].astype(str)
    test_df[col] = test_df[col].astype(str)

# Combine data from both datasets for fitting the encoder
combined_data = pd.concat([train_df[label_cols], test_df[label_cols]], axis=0)

# Label encoding using combined data
label_encoders = {col: LabelEncoder().fit(combined_data[col]) for col in label_cols}

# Apply label encoding to the train and test sets
for col, le in label_encoders.items():
    train_df[col] = le.transform(train_df[col])
    test_df[col] = le.transform(test_df[col])

# Prepare features and target
X = train_df.drop(['PassengerId', 'Name', 'Transported'], axis=1)
y = train_df['Transported'].astype(int)
X_test = test_df.drop(['PassengerId', 'Name'], axis=1)

 

Model Training

# Split the data into train and validation sets
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
# Train the model
model = xgb.XGBClassifier( n_estimators= 200, learning_rate= 0.1, max_depth = 5)
model.fit(X_train, y_train)
# Predict on validation set
y_pred = model.predict(X_val)
# Calculate accuracy
print('Classification Report:')
print(classification_report(y_val, y_pred))

 

Submission

# Predict on the test set
test_pred = model.predict(X_test)
# Prepare submission file
submission = pd.DataFrame({
    'PassengerId': test_df['PassengerId'],
    'Transported': test_pred
})
# Convert boolean predictions to string (True/False)
submission['Transported'] = submission['Transported'].map({1: True, 0: False})
# Save the submission file
submission.to_csv('submission.csv', index=False)
print('Submission file saved as submission.csv')