|  | 
| 2.如何继续训练 继续训练的关键代码:
 args.syncnet_checkpoint_path='weight\syncnet\ex\syncnet_checkpoint_384_20_000021500_2024-11-18.pth'
 args.checkpoint_path='weight\wav\ex_wav2lip_margin\gen_last_wav128_1e4.pth'  #继续训练
 args.data_root='preprocessed_root/data_train'
 
 Load checkpoint from: weight\syncnet\ex\syncnet_checkpoint_384_20_000021500_2024-11-18.pth
 G:\wav2lip384_my2-main\2.py:684: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytor ... md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
 checkpoint = torch.load(checkpoint_path)
 total wav2lip trainable params 159087803
 total disc trainable params 43082817
 total syncnet  params 65054464
 Starting Epoch: 0
 G:\wav2lip384_my2-main\2.py:406: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
 with torch.cuda.amp.autocast(enabled=False):
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 G:\wav2lip384_my2-main\2.py:429: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.
 with torch.cuda.amp.autocast(enabled=False):
 Step 1 | L1: 2.40438 | Sync_wt: 0.0030 Sync: 0.421151, Percep: 0.703886 | Fake: 0.682866, Real: 0.709179 | Load: 0.208103, Train: 3.14551
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 ...
 Starting Epoch: 44
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 221 | L1: 0.408767 | Sync_wt: 0.0300 Sync: 0.973213, Percep: 0.224766 | Fake: 1.93283, Real: 0.0268605 | Load: 0.17092, Train: 2.21316
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 222 | L1: 0.477655 | Sync_wt: 0.0300 Sync: 1.43658, Percep: 2.60373 | Fake: 0.971099, Real: 0.876769 | Load: 0.187253, Train: 1.71426
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 223 | L1: 0.491983 | Sync_wt: 0.0300 Sync: 1.46766, Percep: 3.08503 | Fake: 0.654007, Real: 0.707941 | Load: 0.183522, Train: 1.88717
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 224 | L1: 0.503687 | Sync_wt: 0.0300 Sync: 1.37013, Percep: 3.26725 | Fake: 0.496582, Real: 0.618142 | Load: 0.170331, Train: 1.87882
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 225 | L1: 0.506882 | Sync_wt: 0.0300 Sync: 1.16493, Percep: 3.3811 | Fake: 0.402043, Real: 0.513971 | Load: 0.169339, Train: 1.88462
 torch.Size([1, 3, 5, 384, 384])
 torch.Size([1, 3, 5, 192, 384])
 
 ...
 Starting Epoch: 473
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 2366 | L1: 0.231807 | Sync_wt: 0.0300 Sync: 1.22511, Percep: 1.69847 | Fake: 0.206287, Real: 0.589591 | Load: 0.176917, Train: 2.5686
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 2367 | L1: 0.205227 | Sync_wt: 0.0300 Sync: 1.29311, Percep: 2.06025 | Fake: 0.435907, Real: 0.298213 | Load: 0.199886, Train: 1.97014
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 2368 | L1: 0.220921 | Sync_wt: 0.0300 Sync: 0.943628, Percep: 2.31971 | Fake: 0.321806, Real: 0.293943 | Load: 0.19954, Train: 2.16911
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 2369 | L1: 0.22782 | Sync_wt: 0.0300 Sync: 1.16881, Percep: 2.55586 | Fake: 0.251937, Real: 0.311174 | Load: 0.177899, Train: 2.17077
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 2370 | L1: 0.250215 | Sync_wt: 0.0300 Sync: 1.4198, Percep: 2.47995 | Fake: 0.226859, Real: 0.291268 | Load: 0.173385, Train: 2.11635
 torch.Size([1, 3, 5, 384, 384])
 torch.Size([1, 3, 5, 192, 384])
 
 
 Starting Epoch: 569
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 2846 | L1: 0.225897 | Sync_wt: 0.0300 Sync: 0.53349, Percep: 2.02565 | Fake: 0.151527, Real: 0.0176994 | Load: 0.176329, Train: 2.2614
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 2847 | L1: 0.23254 | Sync_wt: 0.0300 Sync: 0.889151, Percep: 2.03761 | Fake: 0.186591, Real: 0.0162578 | Load: 0.190352, Train: 1.75833
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 2848 | L1: 0.251053 | Sync_wt: 0.0300 Sync: 0.725673, Percep: 2.00203 | Fake: 0.228083, Real: 0.015268 | Load: 0.177649, Train: 1.92353
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 2849 | L1: 0.251991 | Sync_wt: 0.0300 Sync: 0.698621, Percep: 2.0589 | Fake: 0.241014, Real: 0.0180526 | Load: 0.179897, Train: 1.88287
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 2850 | L1: 0.253426 | Sync_wt: 0.0300 Sync: 0.615678, Percep: 2.68241 | Fake: 0.194633, Real: 0.132936 | Load: 0.185898, Train: 1.98481
 torch.Size([1, 3, 5, 384, 384])
 torch.Size([1, 3, 5, 192, 384])
 
 Starting Epoch: 758
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 3791 | L1: 0.206628 | Sync_wt: 0.0300 Sync: 0.935533, Percep: 5.11073 | Fake: 0.00862205, Real: 0.00432247 | Load: 0.16981, Train: 2.25067
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 3792 | L1: 0.250087 | Sync_wt: 0.0300 Sync: 0.695988, Percep: 4.94466 | Fake: 0.00982878, Real: 0.00432828 | Load: 0.212077, Train: 1.79654
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 3793 | L1: 0.232677 | Sync_wt: 0.0300 Sync: 0.613124, Percep: 4.96901 | Fake: 0.00963348, Real: 0.00433 | Load: 0.1809, Train: 1.96615
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 3794 | L1: 0.23422 | Sync_wt: 0.0300 Sync: 0.582902, Percep: 5.00178 | Fake: 0.00941184, Real: 0.00435563 | Load: 0.190866, Train: 1.94068
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 3795 | L1: 0.240213 | Sync_wt: 0.0300 Sync: 0.695015, Percep: 5.009 | Fake: 0.00937021, Real: 0.00435027 | Load: 0.182902, Train: 1.92828
 torch.Size([1, 3, 5, 384, 384])
 torch.Size([1, 3, 5, 192, 384])
 
 Starting Epoch: 929
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 4646 | L1: 0.235182 | Sync_wt: 0.0300 Sync: 1.34508, Percep: 1.38635 | Fake: 0.795776, Real: 0.277558 | Load: 0.165917, Train: 2.22869
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 4647 | L1: 0.228566 | Sync_wt: 0.0300 Sync: 0.868533, Percep: 1.33973 | Fake: 0.59392, Real: 0.170588 | Load: 0.186512, Train: 1.7112
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 4648 | L1: 0.226583 | Sync_wt: 0.0300 Sync: 1.14048, Percep: 1.73176 | Fake: 0.458232, Real: 0.278667 | Load: 0.176913, Train: 1.89952
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 Step 4649 | L1: 0.229176 | Sync_wt: 0.0300 Sync: 0.885194, Percep: 2.20822 | Fake: 0.353763, Real: 0.230063 | Load: 0.162894, Train: 1.88558
 torch.Size([2, 3, 5, 384, 384])
 torch.Size([2, 3, 5, 192, 384])
 
 
 | 
 |