Skip to content

Commit

Permalink
Remove some ignored errors (pytorch#1605)
Browse files Browse the repository at this point in the history
* Removed ignored errors and warnings

* autopep8 fix

* Fixed import errors

* autopep8 fix

* More fixes

* Fixed code-formatting tools call order

* autopep8 fix

* Fix isort

* Fixed code-style action

* Updated isort to >5.0.0
and reformatted files

* Fixed isort version and reverted code-style install

Co-authored-by: vfdev-5 <vfdev-5@users.noreply.github.com>
  • Loading branch information
vfdev-5 and vfdev-5 committed Feb 1, 2021
1 parent f55ca55 commit 0bb3c6c
Show file tree
Hide file tree
Showing 69 changed files with 270 additions and 241 deletions.
5 changes: 3 additions & 2 deletions .github/workflows/code-style.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,11 @@ jobs:
with:
python-version: "3.8"
- run: |
python -m pip install autopep8 "black==19.10b0" "isort==4.3.21"
isort -rc .
python -m pip install autopep8 "black==19.10b0" "isort==5.7.0"
autopep8 --recursive --in-place --aggressive --aggressive .
black .
isort .
- name: Commit and push changes
uses: stefanzweifel/git-auto-commit-action@v4
with:
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/unit-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -79,10 +79,10 @@ jobs:
- name: Check code formatting
shell: bash -l {0}
run: |
pip install flake8 "black==19.10b0" "isort==4.3.21"
pip install flake8 "black==19.10b0" "isort==5.7.0"
flake8 ignite/ tests/ examples/
black --check .
isort -rc -c .
isort -c .
- name: Run Mypy
shell: bash -l {0}
Expand Down
2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ git clone https://github.com/pytorch/ignite.git
cd ignite
python setup.py develop
pip install -r requirements-dev.txt
pip install flake8 "black==19.10b0" "isort==4.3.21" mypy
pip install flake8 "black==19.10b0" "isort==5.7.0" mypy
```

### Code development
Expand Down
15 changes: 6 additions & 9 deletions examples/contrib/cifar10/main.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,20 @@
from pathlib import Path
from datetime import datetime
from pathlib import Path

import fire

import torch
import torch.nn as nn
import torch.optim as optim
import utils

import ignite
import ignite.distributed as idist
from ignite.engine import Events, Engine, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from ignite.handlers import Checkpoint, DiskSaver
from ignite.utils import manual_seed, setup_logger

from ignite.contrib.engines import common
from ignite.contrib.handlers import PiecewiseLinear

import utils
from ignite.engine import Engine, Events, create_supervised_evaluator
from ignite.handlers import Checkpoint, DiskSaver
from ignite.metrics import Accuracy, Loss
from ignite.utils import manual_seed, setup_logger


def training(local_rank, config):
Expand Down
5 changes: 2 additions & 3 deletions examples/contrib/cifar10/utils.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
import os

from torchvision import models
from torchvision import datasets
from torchvision.transforms import Compose, ToTensor, Normalize, Pad, RandomCrop, RandomHorizontalFlip
from torchvision import datasets, models
from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomHorizontalFlip, ToTensor

train_transform = Compose(
[
Expand Down
11 changes: 4 additions & 7 deletions examples/contrib/cifar100_amp_benchmark/benchmark_fp32.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,14 @@
import fire

import torch
from torch.nn import CrossEntropyLoss
from torch.optim import SGD

from torchvision.models import wide_resnet50_2
from utils import get_train_eval_loaders

from ignite.engine import Events, Engine, create_supervised_evaluator, convert_tensor
from ignite.metrics import Accuracy, Loss
from ignite.handlers import Timer
from ignite.contrib.handlers import ProgressBar

from utils import get_train_eval_loaders
from ignite.engine import Engine, Events, convert_tensor, create_supervised_evaluator
from ignite.handlers import Timer
from ignite.metrics import Accuracy, Loss


def main(dataset_path, batch_size=256, max_epochs=10):
Expand Down
14 changes: 5 additions & 9 deletions examples/contrib/cifar100_amp_benchmark/benchmark_nvidia_apex.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,15 @@
import fire

import torch
from apex import amp
from torch.nn import CrossEntropyLoss
from torch.optim import SGD

from torchvision.models import wide_resnet50_2
from utils import get_train_eval_loaders

from apex import amp

from ignite.engine import Events, Engine, create_supervised_evaluator, convert_tensor
from ignite.metrics import Accuracy, Loss
from ignite.handlers import Timer
from ignite.contrib.handlers import ProgressBar

from utils import get_train_eval_loaders
from ignite.engine import Engine, Events, convert_tensor, create_supervised_evaluator
from ignite.handlers import Timer
from ignite.metrics import Accuracy, Loss


def main(dataset_path, batch_size=256, max_epochs=10, opt="O1"):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,20 +1,15 @@
import fire

import torch
from torch.cuda.amp import GradScaler, autocast
from torch.nn import CrossEntropyLoss
from torch.optim import SGD

# Creates a GradScaler once at the beginning of training.
from torch.cuda.amp import GradScaler, autocast

from torchvision.models import wide_resnet50_2
from utils import get_train_eval_loaders

from ignite.engine import Events, Engine, create_supervised_evaluator, convert_tensor
from ignite.metrics import Accuracy, Loss
from ignite.handlers import Timer
from ignite.contrib.handlers import ProgressBar

from utils import get_train_eval_loaders
from ignite.engine import Engine, Events, convert_tensor, create_supervised_evaluator
from ignite.handlers import Timer
from ignite.metrics import Accuracy, Loss


def main(dataset_path, batch_size=256, max_epochs=10):
Expand Down
6 changes: 2 additions & 4 deletions examples/contrib/cifar100_amp_benchmark/utils.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
import random

from torch.utils.data import DataLoader, Subset
from torchvision.datasets.cifar import CIFAR100
from torchvision.transforms import Compose, RandomCrop, Pad, RandomHorizontalFlip
from torchvision.transforms import ToTensor, Normalize, RandomErasing

from torch.utils.data import Subset, DataLoader
from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomErasing, RandomHorizontalFlip, ToTensor


def get_train_eval_loaders(path, batch_size=256):
Expand Down
2 changes: 1 addition & 1 deletion examples/contrib/cifar10_qat/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@
import torch
import torch.nn as nn
import torch.optim as optim
import utils

import ignite
import ignite.distributed as idist
import utils
from ignite.contrib.engines import common
from ignite.contrib.handlers import PiecewiseLinear
from ignite.engine import Engine, Events, create_supervised_evaluator
Expand Down
3 changes: 1 addition & 2 deletions examples/contrib/cifar10_qat/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,10 @@
import torch
import torch.nn as nn
import torchvision
from pact import PACTReLU
from torchvision import datasets, models
from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomHorizontalFlip, ToTensor

from pact import PACTReLU

train_transform = Compose(
[
Pad(4),
Expand Down
16 changes: 12 additions & 4 deletions examples/contrib/mnist/mnist_with_clearml_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,18 @@
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, ToTensor, Normalize

from ignite.contrib.handlers.clearml_logger import *
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from torchvision.transforms import Compose, Normalize, ToTensor

from ignite.contrib.handlers.clearml_logger import (
ClearMLLogger,
ClearMLSaver,
GradsHistHandler,
GradsScalarHandler,
WeightsHistHandler,
WeightsScalarHandler,
global_step_from_engine,
)
from ignite.engine import Events, create_supervised_evaluator, create_supervised_trainer
from ignite.handlers import Checkpoint
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger
Expand Down
16 changes: 11 additions & 5 deletions examples/contrib/mnist/mnist_with_neptune_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,18 @@
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, ToTensor, Normalize

from ignite.contrib.handlers.neptune_logger import *
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from torchvision.transforms import Compose, Normalize, ToTensor

from ignite.contrib.handlers.neptune_logger import (
GradsScalarHandler,
NeptuneLogger,
NeptuneSaver,
WeightsScalarHandler,
global_step_from_engine,
)
from ignite.engine import Events, create_supervised_evaluator, create_supervised_trainer
from ignite.handlers import Checkpoint
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger


Expand Down
17 changes: 12 additions & 5 deletions examples/contrib/mnist/mnist_with_tensorboard_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,19 @@
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, ToTensor, Normalize

from ignite.contrib.handlers.tensorboard_logger import *
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from torchvision.transforms import Compose, Normalize, ToTensor

from ignite.contrib.handlers.tensorboard_logger import (
GradsHistHandler,
GradsScalarHandler,
TensorboardLogger,
WeightsHistHandler,
WeightsScalarHandler,
global_step_from_engine,
)
from ignite.engine import Events, create_supervised_evaluator, create_supervised_trainer
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger


Expand Down
8 changes: 4 additions & 4 deletions examples/contrib/mnist/mnist_with_tqdm_logger.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
from argparse import ArgumentParser

import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
import torch
import torch.nn.functional as F
from torchvision.transforms import Compose, ToTensor, Normalize
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor

from ignite.contrib.handlers import ProgressBar
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.engine import Events, create_supervised_evaluator, create_supervised_trainer
from ignite.metrics import Accuracy, Loss, RunningAverage


Expand Down
19 changes: 12 additions & 7 deletions examples/contrib/mnist/mnist_with_visdom_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,22 @@
from argparse import ArgumentParser

import torch
from torch.utils.data import DataLoader
from torch import nn
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, ToTensor, Normalize

from ignite.contrib.handlers.visdom_logger import *
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from torchvision.transforms import Compose, Normalize, ToTensor

from ignite.contrib.handlers.visdom_logger import (
GradsScalarHandler,
VisdomLogger,
WeightsScalarHandler,
global_step_from_engine,
)
from ignite.engine import Events, create_supervised_evaluator, create_supervised_trainer
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger


Expand Down
8 changes: 4 additions & 4 deletions examples/contrib/mnist/mnist_with_wandb_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,12 @@
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, ToTensor, Normalize
from torchvision.transforms import Compose, Normalize, ToTensor

from ignite.contrib.handlers.wandb_logger import *
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from ignite.contrib.handlers.wandb_logger import WandBLogger, global_step_from_engine
from ignite.engine import Events, create_supervised_evaluator, create_supervised_trainer
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger


Expand Down
17 changes: 7 additions & 10 deletions examples/fast_neural_style/neural_style.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,22 @@
# coding: utf-8
import argparse
import os
import random
import sys
from collections import OrderedDict

import numpy as np
import random
import torch
import utils
from handlers import Progbar
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms

from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint

import utils
from torchvision import datasets, transforms
from transformer_net import TransformerNet
from vgg import Vgg16
from handlers import Progbar

from collections import OrderedDict
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint


def check_paths(args):
Expand Down
2 changes: 1 addition & 1 deletion examples/gan/dcgan.py
Original file line number Diff line number Diff line change
Expand Up @@ -366,9 +366,9 @@ def create_plots(engine):

mpl.use("agg")

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

except ImportError:
warnings.warn("Loss plots will not be generated -- pandas or matplotlib not found")
Expand Down
9 changes: 4 additions & 5 deletions examples/mnist/mnist.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,18 @@
from argparse import ArgumentParser

import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torchvision.transforms import Compose, ToTensor, Normalize
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, Normalize, ToTensor
from tqdm import tqdm

from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.engine import Events, create_supervised_evaluator, create_supervised_trainer
from ignite.metrics import Accuracy, Loss
from ignite.utils import setup_logger

from tqdm import tqdm


class Net(nn.Module):
def __init__(self):
Expand Down
Loading

0 comments on commit 0bb3c6c

Please sign in to comment.