発生している問題・エラーメッセージ
----> 5 model = classifier(flags.aug_kwargs) 6 model.load_state_dict(torch.load(model_path)) 725 result = self._slow_forward(*input, **kwargs) 726 else: --> 727 result = self.forward(*input, **kwargs) 728 for hook in itertools.chain( 729 _global_forward_hooks.values(), TypeError: forward() missing 1 required positional argument: 'targets'
該当のソースコード
Python
@dataclass class Flags: # General debug: bool = True outdir: str = "results/det" device: str = "cuda:0" # Data config imgdir_name: str = "vinbigdata-chest-xray-resized-png-256x256" # split_mode: str = "all_train" seed: int = 111 target_fold: int = 0 # 0~4 label_smoothing: float = 0.0 # Model config model_name: str = "tf_inception_v3" model_mode: str = "normal" # normal, cnn_fixed supported # Training config epoch: int = 20 batchsize: int = 8 valid_batchsize: int = 16 num_workers: int = 4 snapshot_freq: int = 5 ema_decay: float = 0.999 # negative value is to inactivate ema. scheduler_type: str = "" scheduler_kwargs: Dict[str, Any] = field(default_factory=lambda: {}) scheduler_trigger: List[Union[int, str]] = field(default_factory=lambda: [1, "iteration"]) aug_kwargs: Dict[str, Dict[str, Any]] = field(default_factory=lambda: {}) mixup_prob: float = -1.0 # Apply mixup augmentation when positive value is set. def update(self, param_dict: Dict) -> "Flags": # Overwrite by `param_dict` for key, value in param_dict.items(): if not hasattr(self, key): raise ValueError(f"[ERROR] Unexpected key for flag = {key}") setattr(self, key, value) return self flags_dict = { "debug": False, # Change to True for fast debug run! "outdir": "results/tmp_debug", # Data "imgdir_name": "vinbigdata-chest-xray-resized-png-256x256", # Model "model_name": "tf_inception_v3",#"resnet18", # Training "num_workers": 4, "epoch": 15, "batchsize": 8, "scheduler_type": "CosineAnnealingWarmRestarts", "scheduler_kwargs": {"T_0": 28125}, # 15000 * 15 epoch // (batchsize=8) "scheduler_trigger": [1, "iteration"], "aug_kwargs": { "HorizontalFlip": {"p": 0.5}, "ShiftScaleRotate": {"scale_limit": 0.15, "rotate_limit": 10, "p": 0.5}, "RandomBrightnessContrast": {"p": 0.5}, "CoarseDropout": {"max_holes": 8, "max_height": 25, "max_width": 25, "p": 0.5}, "Blur": {"blur_limit": [3, 7], "p": 0.5}, "Downscale": {"scale_min": 0.25, "scale_max": 0.9, "p": 0.3}, "RandomGamma": {"gamma_limit": [80, 120], "p": 0.6}, } } # args = parse() print("torch", torch.__version__) flags = Flags().update(flags_dict) print("flags", flags) debug = flags.debug outdir = Path(flags.outdir) os.makedirs(str(outdir), exist_ok=True) flags_dict = dataclasses.asdict(flags) save_yaml(str(outdir / "flags.yaml"), flags_dict) # --- Read data --- inputdir = Path("/kaggle/input") datadir = inputdir / "vinbigdata-chest-xray-abnormalities-detection" imgdir = inputdir / flags.imgdir_name # Read in the data CSV files train = pd.read_csv(datadir / "train.csv") class CNNFixedPredictor(nn.Module): def __init__(self, cnn: nn.Module, num_classes: int = 2): super(CNNFixedPredictor, self).__init__() self.cnn = cnn self.lin = Linear(cnn.num_features, num_classes) print("cnn.num_features", cnn.num_features) # We do not learn CNN parameters. # https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html for param in self.cnn.parameters(): param.requires_grad = False def forward(self, x): feat = self.cnn(x) return self.lin(feat) def build_predictor(model_name: str, model_mode: str = "normal"): if model_mode == "normal": # normal configuration. train all parameters. return timm.create_model(model_name, pretrained=True, num_classes=2, in_chans=3) elif model_mode == "cnn_fixed": # normal configuration. train all parameters. # https://rwightman.github.io/pytorch-image-models/feature_extraction/ timm_model = timm.create_model(model_name, pretrained=True, num_classes=0, in_chans=3) return CNNFixedPredictor(timm_model, num_classes=2) else: raise ValueError(f"[ERROR] Unexpected value model_mode={model_mode}") class Classifier(nn.Module): """two class classfication""" def __init__(self, predictor, lossfun=cross_entropy_with_logits): super().__init__() self.predictor = predictor self.lossfun = lossfun self.prefix = "" def forward(self, image, targets): outputs = self.predictor(image) loss = self.lossfun(outputs, targets) metrics = { f"{self.prefix}loss": loss.item(), f"{self.prefix}acc": accuracy_with_logits(outputs, targets).item() } ppe.reporting.report(metrics, self) return loss, metrics def predict(self, data_loader): pred = self.predict_proba(data_loader) label = torch.argmax(pred, dim=1) return label def predict_proba(self, data_loader): device: torch.device = next(self.parameters()).device y_list = [] self.eval() with torch.no_grad(): for batch in data_loader: if isinstance(batch, (tuple, list)): # Assumes first argument is "image" batch = batch[0].to(device) else: batch = batch.to(device) y = self.predictor(batch) y = torch.softmax(y, dim=-1) y_list.append(y) pred = torch.cat(y_list) return pred def create_trainer(model, optimizer, device) -> Engine: model.to(device) def update_fn(engine, batch): model.train() optimizer.zero_grad() loss, metrics = model(*[elem.to(device) for elem in batch]) loss.backward() optimizer.step() return metrics trainer = Engine(update_fn) return trainer skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=flags.seed) # skf.get_n_splits(None, None) y = np.array([int(len(d["annotations"]) > 0) for d in dataset_dicts]) split_inds = list(skf.split(dataset_dicts, y)) train_inds, valid_inds = split_inds[flags.target_fold] # 0th fold train_dataset = VinbigdataTwoClassDataset( [dataset_dicts[i] for i in train_inds], image_transform=Transform(flags.aug_kwargs), mixup_prob=flags.mixup_prob, label_smoothing=flags.label_smoothing, ) valid_dataset = VinbigdataTwoClassDataset([dataset_dicts[i] for i in valid_inds]) train_loader = DataLoader( train_dataset, batch_size=flags.batchsize, num_workers=flags.num_workers, shuffle=True, pin_memory=True, ) valid_loader = DataLoader( valid_dataset, batch_size=flags.valid_batchsize, num_workers=flags.num_workers, shuffle=False, pin_memory=True, ) device = torch.device(flags.device) predictor = build_predictor(model_name=flags.model_name, model_mode=flags.model_mode) classifier = Classifier(predictor) model = classifier optimizer = optim.Adam([param for param in model.parameters() if param.requires_grad], lr=1e-3) # Train setup trainer = create_trainer(model, optimizer, device) ema = EMA(predictor, decay=flags.ema_decay) def eval_func(*batch): loss, metrics = model(*[elem.to(device) for elem in batch]) # HACKING: report ema value with prefix. if flags.ema_decay > 0: classifier.prefix = "ema_" ema.assign() loss, metrics = model(*[elem.to(device) for elem in batch]) ema.resume() classifier.prefix = "" valid_evaluator = E.Evaluator( valid_loader, model, progress_bar=False, eval_func=eval_func, device=device ) # log_trigger = (10 if debug else 1000, "iteration") log_trigger = (1, "epoch") log_report = E.LogReport(trigger=log_trigger) extensions = [ log_report, E.ProgressBarNotebook(update_interval=10 if debug else 100), # Show progress bar during training E.PrintReportNotebook(), E.FailOnNonNumber(), ] epoch = flags.epoch models = {"main": model} optimizers = {"main": optimizer} manager = IgniteExtensionsManager( trainer, models, optimizers, epoch, extensions=extensions, out_dir=str(outdir), ) # Run evaluation for valid dataset in each epoch. manager.extend(valid_evaluator) # Save predictor.pt every epoch manager.extend( E.snapshot_object(predictor, "predictor.pt"), trigger=(flags.snapshot_freq, "epoch") ) manager.extend(E.observe_lr(optimizer=optimizer), trigger=log_trigger) if flags.ema_decay > 0: # Exponential moving average manager.extend(lambda manager: ema(), trigger=(1, "iteration")) def save_ema_model(manager): ema.assign() #torch.save(predictor.state_dict(), outdir / "predictor_ema.pt") torch.save(predictor.state_dict(), "/kaggle/working/predictor_ema.pt") ema.resume() manager.extend(save_ema_model, trigger=(flags.snapshot_freq, "epoch")) _ = trainer.run(train_loader, max_epochs=epoch) torch.save(predictor.state_dict(), "/kaggle/working/predictor_tf_inception_v3.pt") df = log_report.to_dataframe() df.to_csv("/kaggle/working/log.csv", index=False) ####################################追記終わり##################################################### #モデルの読み込み model_path = './predictor_last.pt' model = classifier(flags.aug_kwargs) model.load_state_dict(torch.load(model_path))
まだ回答がついていません
会員登録して回答してみよう