#!/usr/bin/env python 3 import os import json import argparse import warnings from typing import List, Tuple, Optional import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA from sklearn.cluster import KMeans, DBSCAN, AgglomerativeClustering from sklearn.metrics import ( silhouette_score, calinski_harabasz_score, davies_bouldin_score, adjusted_rand_score, normalized_mutual_info_score, homogeneity_completeness_v_measure, ) from sklearn.preprocessing import StandardScaler import joblib import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf from tensorflow.keras.applications import MobileNetV2, EfficientNetB0 from tensorflow.keras.applications.mobilenet_v2 import preprocess_input as mobilenet_preprocess from tensorflow.keras.applications.efficientnet import preprocess_input as efficientnet_preprocess from tensorflow.keras.utils import load_img, img_to_array # ----------------------------- # Utilities # ----------------------------- def set_seed(seed: int = 42): np.random.seed(seed) tf.random.set_seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) def ensure_dir(path: str): os.makedirs(path, exist_ok=True) def guess_basename(s: Optional[str]) -> Optional[str]: if s is None or (isinstance(s, float) and np.isnan(s)) or str(s).strip() == "": return None name = os.path.basename(str(s)) base, _ = os.path.splitext(name) return base if base else None def first_existing_column(df: pd.DataFrame, candidates: List[str]) -> Optional[str]: for c in candidates: if c in df.columns: return c return None def build_filename_from_row(row: pd.Series, img_ext: str = ".jpg") -> Optional[str]: """ Build the current filename in order of preference: - New_Name_With_Date (must end with extension or add one) - New_Name - Nombre_Nuevo - basename_final + ext - basename + ext """ for key in ["New_Name_With_Date", "New_Name", "Nombre_Nuevo"]: if key in row and pd.notna(row[key]) and str(row[key]).strip() != "": fname = str(row[key]).strip() if not os.path.splitext(fname)[1]: fname = fname + img_ext return fname for key in ["basename_final", "basename"]: if key in row and pd.notna(row[key]) and str(row[key]).strip() != "": return f"{row[key]}{img_ext}" # As a fallback, try Old_Name if "Old_Name" in row and pd.notna(row["Old_Name"]) and str(row["Old_Name"]).strip() != "": fname = str(row["Old_Name"]).strip() if not os.path.splitext(fname)[1]: fname = fname + img_ext return fname return None # ----------------------------- # Data loading # ----------------------------- def load_and_merge_csvs(csv_GBIF: str, csv_AV: str) -> pd.DataFrame: """ Load two CSVs and outer-merge them on basename (robust extraction). """ def read_csv_any(path: str) -> pd.DataFrame: for enc in ("utf-8", "utf-8-sig", "latin-1"): try: return pd.read_csv(path, encoding=enc) except UnicodeDecodeError: continue return pd.read_csv(path, encoding="utf-8", errors="replace") df_GBIF = read_csv_any(csv_GBIF) df_AV = read_csv_any(csv_AV) # Create basename columns # For df_GBIF try in this order a_fname_col = first_existing_column(df_GBIF, ["New_Name_With_Date", "New_Name", "Nombre_Nuevo", "Old_Name", "Nombre_Anterior", "Filename"]) if a_fname_col is None: # Try any string column str_cols = [c for c in df_GBIF.columns if df_GBIF[c].dtype == object] a_fname_col = str_cols[0] if str_cols else None df_GBIF = df_GBIF.copy() if a_fname_col: df_GBIF["basename_a"] = df_GBIF[a_fname_col].apply(guess_basename) else: df_GBIF["basename_a"] = None # For df_AV try basename columns b_base_col = first_existing_column(df_AV, ["basename", "basename_final", "basename_json", "basename_csv"]) if b_base_col is None: # Try to derive from any filename-like column b_fname_col = first_existing_column(df_AV, ["New_Name_With_Date", "New_Name", "Nombre_Nuevo", "Old_Name", "Nombre_Anterior", "Filename"]) if b_fname_col: df_AV["basename_b"] = df_AV[b_fname_col].apply(guess_basename) else: df_AV["basename_b"] = None else: df_AV["basename_b"] = df_AV[b_base_col].apply(lambda x: str(x).strip() if pd.notna(x) else None) # Outer merge merged = pd.merge(df_GBIF, df_AV, left_on="basename_a", right_on="basename_b", how="outer", suffixes=("_a", "_b")) # Create unified basename merged["basename"] = merged["basename_a"].fillna(merged["basename_b"]) return merged def attach_filenames_and_paths(df: pd.DataFrame, images_dir: str, img_ext: str = ".jpg") -> pd.DataFrame: """ Build 'filename' and 'path' columns per row based on best-available fields. """ rows = [] for _, row in df.iterrows(): fname = build_filename_from_row(row, img_ext=img_ext) if fname is None: rows.append(None) continue full_path = os.path.join(images_dir, fname) rows.append((fname, full_path)) df = df.copy() df["filename_path_tuple"] = rows df["filename"] = df["filename_path_tuple"].apply(lambda t: t[0] if t else None) df["path"] = df["filename_path_tuple"].apply(lambda t: t[1] if t else None) df.drop(columns=["filename_path_tuple"], inplace=True) # Verify file existence df["exists"] = df["path"].apply(lambda p: os.path.exists(p) if isinstance(p, str) else False) missing = (~df["exists"]).sum() if missing > 0: warnings.warn(f"{missing} files listed but not found on disk. They will be ignored.") return df[df["exists"]].reset_index(drop=True) # ----------------------------- # Embeddings # ----------------------------- def make_preprocess(backbone: str): if backbone == "mobilenet": return mobilenet_preprocess elif backbone == "efficientnet": return efficientnet_preprocess else: return mobilenet_preprocess def make_backbone_model(img_size: int, backbone: str = "mobilenet") -> tf.keras.Model: input_shape = (img_size, img_size, 3) if backbone == "mobilenet": base = MobileNetV2(include_top=False, weights="imagenet", input_shape=input_shape, pooling="avg") elif backbone == "efficientnet": base = EfficientNetB0(include_top=False, weights="imagenet", input_shape=input_shape, pooling="avg") else: base = MobileNetV2(include_top=False, weights="imagenet", input_shape=input_shape, pooling="avg") base.trainable = False return base def load_image(path: str, img_size: int) -> np.ndarray: img = load_img(path, target_size=(img_size, img_size)) arr = img_to_array(img) return arr def build_dataset(paths: List[str], img_size: int, preprocess_fn, batch_size: int = 32) -> tf.data.Dataset: path_ds = tf.data.Dataset.from_tensor_slices(paths) def _load(p): img = tf.numpy_function(lambda x: load_image(x.decode(), img_size), [p], tf.float32) img.set_shape((img_size, img_size, 3)) img = preprocess_fn(img) return img ds = path_ds.map(lambda p: _load(p), num_parallel_calls=tf.data.AUTOTUNE) ds = ds.batch(batch_size).prefetch(tf.data.AUTOTUNE) return ds def compute_embeddings(model: tf.keras.Model, ds: tf.data.Dataset) -> np.ndarray: emb = model.predict(ds, verbose=1) return emb # ----------------------------- # Clustering and evaluation # ----------------------------- def fit_reduction(train_emb: np.ndarray, n_pca: int = 50): scaler = StandardScaler() train_scaled = scaler.fit_transform(train_emb) pca = PCA(n_components=min(n_pca, train_scaled.shape[1])) train_pca = pca.fit_transform(train_scaled) return scaler, pca, train_pca def transform_reduction(emb: np.ndarray, scaler: StandardScaler, pca: PCA) -> np.ndarray: return pca.transform(scaler.transform(emb)) def fit_cluster_algo(cluster: str, n_clusters: int, train_feats: np.ndarray): if cluster == "kmeans": km = KMeans(n_clusters=n_clusters, n_init="auto", random_state=42) km.fit(train_feats) return km, km.labels_, km.cluster_centers_ elif cluster == "dbscan": db = DBSCAN(eps=0.8, min_samples=5, n_jobs=-1) db.fit(train_feats) # Compute centroids for assignment on val/test centers = [] labels = db.labels_ for c in sorted(set(labels)): if c == -1: continue centers.append(train_feats[labels == c].mean(axis=0)) centers = np.array(centers) if centers else None return db, labels, centers else: # agglomerative ag = AgglomerativeClustering(n_clusters=n_clusters) labels = ag.fit_predict(train_feats) # Compute centroids centers = [] for c in range(n_clusters): centers.append(train_feats[labels == c].mean(axis=0)) centers = np.array(centers) return ag, labels, centers def assign_to_nearest_centroid(feats: np.ndarray, centers: Optional[np.ndarray]) -> np.ndarray: if centers is None or len(centers) == 0: return np.full((feats.shape[0],), -1, dtype=int) dists = ((feats[:, None, :] - centers[None, :, :]) ** 2).sum(axis=2) return np.argmin(dists, axis=1) def internal_metrics(X: np.ndarray, labels: np.ndarray) -> dict: # Ignore noise label -1 for silhouette etc. mask = labels != -1 res = {} if mask.sum() > 1 and len(np.unique(labels[mask])) > 1: res["silhouette"] = float(silhouette_score(X[mask], labels[mask])) res["calinski_harabasz"] = float(calinski_harabasz_score(X[mask], labels[mask])) res["davies_bouldin"] = float(davies_bouldin_score(X[mask], labels[mask])) else: res["silhouette"] = None res["calinski_harabasz"] = None res["davies_bouldin"] = None return res def external_metrics(y_true: Optional[np.ndarray], y_pred: np.ndarray) -> dict: if y_true is None or pd.isna(y_true).all(): return {} # Filter where y_true is valid m = pd.notna(y_true).values if m.sum() == 0: return {} yt = y_true[m] yp = y_pred[m] res = {} try: res["ARI"] = float(adjusted_rand_score(yt, yp)) res["NMI"] = float(normalized_mutual_info_score(yt, yp)) h, c, v = homogeneity_completeness_v_measure(yt, yp) res["homogeneity"] = float(h) res["completeness"] = float(c) res["v_measure"] = float(v) except Exception: pass return res # ----------------------------- # Plotting # ----------------------------- def plot_scatter_2d(X2d: np.ndarray, labels: np.ndarray, title: str, out_path: str): plt.figure(figsize=(8, 6)) palette = sns.color_palette("tab20", n_colors=max(2, len(np.unique(labels)))) sns.scatterplot(x=X2d[:, 0], y=X2d[:, 1], hue=labels, palette=palette, s=12, linewidth=0, legend=False) plt.title(title) plt.tight_layout() plt.savefig(out_path, dpi=180) plt.close() # ----------------------------- # Main pipeline # ----------------------------- def parse_args(): parser = argparse.ArgumentParser(description="Unsupervised image clustering with pretrained CNN embeddings") parser.add_argument("--images_dir", default=r"C:\Users\sof12\Desktop\ML\Datasets\Nocciola_GBIF") parser.add_argument("--csv_GBIF", default=r"C:\Users\sof12\Desktop\ML\Datasets\Nocciola_GBIF\change_namesAV.csv") parser.add_argument("--csv_AV", default=r"C:\Users\sof12\Desktop\ML\Datasets\Nocciola_GBIF\metadatos_unidos.csv") parser.add_argument("--out_dir", default=r"C:\Users\sof12\Desktop\ML\Datasets\Nocciola_GBIF\TrainingV2") parser.add_argument("--label_col", default=None, help="Optional label column to evaluate external metrics") parser.add_argument("--img_ext", default=".jpg") parser.add_argument("--img_size", type=int, default=224) parser.add_argument("--batch_size", type=int, default=32) parser.add_argument("--seed", type=int, default=42) parser.add_argument("--sample", type=int, default=None, help="Optional max number of images to sample") parser.add_argument("--backbone", choices=["mobilenet", "efficientnet"], default="mobilenet") parser.add_argument("--cluster", choices=["kmeans", "dbscan", "agglomerative"], default="kmeans") parser.add_argument("--n_clusters", type=int, default=7) return parser.parse_args() def main(): args = parse_args() set_seed(args.seed) ensure_dir(args.out_dir) # 1) Load and merge CSVs print("Loading and merging CSVs...") merged = load_and_merge_csvs(args.csv_GBIF, args.csv_AV) # 2) Build filenames and paths based on merged info print("Resolving filenames and verifying files on disk...") merged = attach_filenames_and_paths(merged, args.images_dir, img_ext=args.img_ext) if len(merged) == 0: print("No images found. Check images_dir and CSVs.") return # Labels (optional) y_label = None if args.label_col and args.label_col in merged.columns: y_label = merged[args.label_col].astype(str) print(f"Label column '{args.label_col}' found. Will compute external metrics.") else: if args.label_col: print(f"Label column '{args.label_col}' not found. External metrics will be skipped.") # Optional sampling if args.sample is not None and args.sample < len(merged): merged = merged.sample(n=args.sample, random_state=args.seed).reset_index(drop=True) # 3) Split train/val/test print("Splitting train/val/test...") idx = np.arange(len(merged)) stratify = y_label if y_label is not None and y_label.nunique() > 1 else None idx_train, idx_tmp = train_test_split(idx, test_size=0.30, random_state=args.seed, stratify=stratify) y_tmp = y_label.iloc[idx_tmp] if y_label is not None else None stratify_tmp = y_tmp if (y_tmp is not None and y_tmp.nunique() > 1) else None idx_val, idx_test = train_test_split(idx_tmp, test_size=0.50, random_state=args.seed, stratify=stratify_tmp) df_train = merged.iloc[idx_train].reset_index(drop=True) df_val = merged.iloc[idx_val].reset_index(drop=True) df_test = merged.iloc[idx_test].reset_index(drop=True) print(f"Train: {len(df_train)} | Val: {len(df_val)} | Test: {len(df_test)}") # 4) Embeddings print("Building embedding model...") preprocess_fn = make_preprocess(args.backbone) model = make_backbone_model(args.img_size, backbone=args.backbone) print("Computing embeddings...") ds_train = build_dataset(df_train["path"].tolist(), args.img_size, preprocess_fn, args.batch_size) ds_val = build_dataset(df_val["path"].tolist(), args.img_size, preprocess_fn, args.batch_size) ds_test = build_dataset(df_test["path"].tolist(), args.img_size, preprocess_fn, args.batch_size) emb_train = compute_embeddings(model, ds_train) emb_val = compute_embeddings(model, ds_val) emb_test = compute_embeddings(model, ds_test) # 5) Reduction print("Fitting PCA reduction (50D for clustering, 2D for plots)...") scaler, pca50, train_50 = fit_reduction(emb_train, n_pca=50) val_50 = transform_reduction(emb_val, scaler, pca50) test_50 = transform_reduction(emb_test, scaler, pca50) pca2 = PCA(n_components=2).fit(scaler.transform(emb_train)) train_2d = pca2.transform(scaler.transform(emb_train)) val_2d = pca2.transform(scaler.transform(emb_val)) test_2d = pca2.transform(scaler.transform(emb_test)) # 6) Clustering print(f"Clustering with {args.cluster}...") cluster_model, y_train_clusters, centers = fit_cluster_algo(args.cluster, args.n_clusters, train_50) if args.cluster == "kmeans": y_val_clusters = cluster_model.predict(val_50) y_test_clusters = cluster_model.predict(test_50) else: # Assign by nearest centroid computed on train y_val_clusters = assign_to_nearest_centroid(val_50, centers) y_test_clusters = assign_to_nearest_centroid(test_50, centers) # 7) Metrics print("Computing internal metrics...") train_internal = internal_metrics(train_50, y_train_clusters) val_internal = internal_metrics(val_50, y_val_clusters) test_internal = internal_metrics(test_50, y_test_clusters) if args.label_col and args.label_col in merged.columns: print("Computing external metrics vs labels...") y_train_true = df_train[args.label_col].astype(str) y_val_true = df_val[args.label_col].astype(str) y_test_true = df_test[args.label_col].astype(str) train_external = external_metrics(y_train_true, y_train_clusters) val_external = external_metrics(y_val_true, y_val_clusters) test_external = external_metrics(y_test_true, y_test_clusters) else: train_external = val_external = test_external = {} # 8) Save outputs print("Saving outputs...") ensure_dir(args.out_dir) # Save assignments def save_split_csv(df_split, emb_split, y_clusters, split_name): out_csv = os.path.join(args.out_dir, f"{split_name}_assignments.csv") out_npy = os.path.join(args.out_dir, f"{split_name}_embeddings.npy") df_out = df_split.copy() df_out["cluster"] = y_clusters df_out.to_csv(out_csv, index=False, encoding="utf-8") np.save(out_npy, emb_split) save_split_csv(df_train, emb_train, y_train_clusters, "train") save_split_csv(df_val, emb_val, y_val_clusters, "val") save_split_csv(df_test, emb_test, y_test_clusters, "test") # Save models joblib.dump(scaler, os.path.join(args.out_dir, "scaler.joblib")) joblib.dump(pca50, os.path.join(args.out_dir, "pca50.joblib")) joblib.dump(pca2, os.path.join(args.out_dir, "pca2.joblib")) joblib.dump(cluster_model, os.path.join(args.out_dir, f"{args.cluster}.joblib")) # Plots plot_scatter_2d(train_2d, y_train_clusters, f"Train clusters ({args.cluster})", os.path.join(args.out_dir, "train_clusters_2d.png")) plot_scatter_2d(val_2d, y_val_clusters, f"Val clusters ({args.cluster})", os.path.join(args.out_dir, "val_clusters_2d.png")) plot_scatter_2d(test_2d, y_test_clusters, f"Test clusters ({args.cluster})", os.path.join(args.out_dir, "test_clusters_2d.png")) if args.label_col and args.label_col in merged.columns: # Color by labels for comparison plot_scatter_2d(train_2d, df_train[args.label_col].astype(str).values, "Train by labels", os.path.join(args.out_dir, "train_labels_2d.png")) plot_scatter_2d(val_2d, df_val[args.label_col].astype(str).values, "Val by labels", os.path.join(args.out_dir, "val_labels_2d.png")) plot_scatter_2d(test_2d, df_test[args.label_col].astype(str).values, "Test by labels", os.path.join(args.out_dir, "test_labels_2d.png")) # Summary JSON summary = { "counts": {"train": len(df_train), "val": len(df_val), "test": len(df_test)}, "cluster": args.cluster, "n_clusters": args.n_clusters, "backbone": args.backbone, "img_size": args.img_size, "internal_metrics": { "train": train_internal, "val": val_internal, "test": test_internal, }, "external_metrics": { "train": train_external, "val": val_external, "test": test_external, }, } with open(os.path.join(args.out_dir, "summary.json"), "w", encoding="utf-8") as f: json.dump(summary, f, indent=2, ensure_ascii=False) print("Done. Results saved to:", args.out_dir) if __name__ == "__main__": main()