Arbol de decision

In [244]:
import spacy as scy
    from collections import Counter
    from wordcloud import WordCloud
    import matplotlib.pyplot as plt
    from sklearn.tree import DecisionTreeClassifier, plot_tree
    from sklearn.model_selection import train_test_split, cross_val_score
    from sklearn.metrics import confusion_matrix, accuracy_score
    import numpy as np
    import pandas as pd
    import seaborn as sns
    

Seleccion de datos y preparacion de los datos

In [229]:
path = r'C:\Users\57317\Desktop\Portafoleo_Data_Science\Experimentos-y-Data-Science\Personal Notebooks\Data\survey lung cancer.csv'
    data = pd.read_csv(path)
    
In [230]:
data.info()
    
<class 'pandas.core.frame.DataFrame'>
    RangeIndex: 309 entries, 0 to 308
    Data columns (total 16 columns):
     #   Column                 Non-Null Count  Dtype 
    ---  ------                 --------------  ----- 
     0   GENDER                 309 non-null    object
     1   AGE                    309 non-null    int64 
     2   SMOKING                309 non-null    int64 
     3   YELLOW_FINGERS         309 non-null    int64 
     4   ANXIETY                309 non-null    int64 
     5   PEER_PRESSURE          309 non-null    int64 
     6   CHRONIC DISEASE        309 non-null    int64 
     7   FATIGUE                309 non-null    int64 
     8   ALLERGY                309 non-null    int64 
     9   WHEEZING               309 non-null    int64 
     10  ALCOHOL CONSUMING      309 non-null    int64 
     11  COUGHING               309 non-null    int64 
     12  SHORTNESS OF BREATH    309 non-null    int64 
     13  SWALLOWING DIFFICULTY  309 non-null    int64 
     14  CHEST PAIN             309 non-null    int64 
     15  LUNG_CANCER            309 non-null    object
    dtypes: int64(14), object(2)
    memory usage: 38.8+ KB
    
In [231]:
data.shape
    
Out[231]:
(309, 16)
In [232]:
#Removing Duplicates
    data=data.drop_duplicates()
    
In [237]:
# a) Dividir el conjunto de datos en entrenamiento (200 observaciones) y prueba (resto)
    train_data, test_data = train_test_split(data, train_size=200, random_state=42)
    
    # Variables predictoras y objetivo para los conjuntos de entrenamiento y prueba
    X_train = train_data.drop(columns=['LUNG_CANCER'])
    y_train = train_data['LUNG_CANCER']
    X_test = test_data.drop(columns=['LUNG_CANCER'])
    y_test = test_data['LUNG_CANCER']
    
In [238]:
from sklearn.preprocessing import LabelEncoder
    
    # Convertir todas las columnas categóricas en numéricas
    categorical_columns = X_train.select_dtypes(include=['object']).columns
    
    label_encoders = {}
    for col in categorical_columns:
        le = LabelEncoder()
        X_train[col] = le.fit_transform(X_train[col])
        # Handle unseen values in test set by mapping them to -1
        X_test[col] = X_test[col].apply(lambda x: le.transform([x])[0] if x in le.classes_ else -1)
        label_encoders[col] = le
    

Primer arbol de decision

In [239]:
# b) Ajustar un árbol de decisión a los datos de entrenamiento
    tree_model = DecisionTreeClassifier(random_state=42)
    tree_model.fit(X_train, y_train)
    
    # Generar estadísticas de resumen
    training_accuracy = tree_model.score(X_train, y_train)
    num_terminal_nodes = tree_model.get_n_leaves()
    
In [240]:
# Corregir la visualización e interpretación del árbol
    plt.figure(figsize=(20, 10))
    plot_tree(
        tree_model,
        feature_names=X_train.columns,
        class_names=tree_model.classes_.astype(str),  # Usar clases del modelo
        filled=True,
        max_depth=1  # Mostrar solo el primer nivel para interpretación
    )
    plt.title("Diagrama del árbol de decisión (Primer nivel)")
    plt.show()
    
No description has been provided for this image

Diagrama completo del arbol de decision

In [241]:
# Generar el diagrama completo del árbol de decisión
    plt.figure(figsize=(20, 10))
    plot_tree(
        tree_model,
        feature_names=X_train.columns,
        class_names=tree_model.classes_.astype(str),  # Usar las clases del modelo
        filled=True
    )
    plt.title("Diagrama completo del árbol de decisión")
    plt.show()
    
No description has been provided for this image

Evaluacion de la tasa de decision vs el tamaño del arbol

In [245]:
# e) Predicción y matriz de confusión
    y_pred_test = tree_model.predict(X_test)
    # Supongamos que tienes las variables y_test (valores reales) y y_pred (valores predichos) disponibles
    # Crear la matriz de confusión
    test_conf_matrix = confusion_matrix(y_test, y_pred_test)
    test_error_rate = 1 - accuracy_score(y_test, y_pred_test)
    
    # Configurar el estilo de la matriz de confusión
    plt.figure(figsize=(8, 6))
    sns.set(font_scale=1.2)
    sns.heatmap(test_conf_matrix, annot=True, fmt="d", cmap="Blues", cbar=False)
    plt.xlabel("Predicted")
    plt.ylabel("Actual")
    plt.title("Confusion Matrix")
    plt.show()
    
No description has been provided for this image
In [246]:
# f) Determinar el tamaño óptimo del árbol mediante validación cruzada
    path = tree_model.cost_complexity_pruning_path(X_train, y_train)
    ccp_alphas, impurities = path.ccp_alphas, path.impurities
    
In [247]:
# g) Gráfico del tamaño del árbol frente a la tasa de error de validación cruzada
    train_scores = []
    test_scores = []
    for ccp_alpha in ccp_alphas:
        model = DecisionTreeClassifier(random_state=42, ccp_alpha=ccp_alpha)
        model.fit(X_train, y_train)
        train_scores.append(model.score(X_train, y_train))
        test_scores.append(cross_val_score(model, X_train, y_train, cv=5).mean())
    
In [248]:
plt.figure(figsize=(10, 6))
    plt.plot(ccp_alphas, 1 - np.array(test_scores), marker='o', label="Tasa de error CV")
    plt.xlabel("Alpha de poda (ccp_alpha)")
    plt.ylabel("Tasa de error")
    plt.title("Validación cruzada - Tasa de error vs tamaño del árbol")
    plt.legend()
    plt.grid()
    plt.show()
    
No description has been provided for this image

Creacion de un mejor arbol y evaluacion del mismo

In [249]:
# h) Seleccionar el árbol óptimo basado en validación cruzada
    optimal_alpha = ccp_alphas[np.argmin(1 - np.array(test_scores))]
    
In [250]:
# i) Crear un árbol podado con el alpha óptimo
    pruned_tree_model = DecisionTreeClassifier(random_state=42, ccp_alpha=optimal_alpha)
    pruned_tree_model.fit(X_train, y_train)
    
Out[250]:
DecisionTreeClassifier(ccp_alpha=0.008748455297657429, random_state=42)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
In [251]:
# j) Comparar tasas de error de entrenamiento entre árboles podados y no podados
    pruned_training_accuracy = pruned_tree_model.score(X_train, y_train)
    
In [252]:
# k) Comparar tasas de error de prueba entre árboles podados y no podados
    pruned_y_pred_test = pruned_tree_model.predict(X_test)
    pruned_test_error_rate = 1 - accuracy_score(y_test, pruned_y_pred_test)
    
In [253]:
{
        "Training Accuracy (Unpruned)": training_accuracy,
        "Number of Terminal Nodes (Unpruned)": num_terminal_nodes,
        "Test Error Rate (Unpruned)": test_error_rate,
        "Training Accuracy (Pruned)": pruned_training_accuracy,
        "Test Error Rate (Pruned)": pruned_test_error_rate,
        "Optimal Alpha": optimal_alpha
    }
    
Out[253]:
{'Training Accuracy (Unpruned)': 1.0,
     'Number of Terminal Nodes (Unpruned)': 35,
     'Test Error Rate (Unpruned)': 0.11842105263157898,
     'Training Accuracy (Pruned)': 0.885,
     'Test Error Rate (Pruned)': 0.1842105263157895,
     'Optimal Alpha': 0.008748455297657429}