Răsfoiți Sursa

[Merge] Merge and update copyright

geoyee 3 ani în urmă
părinte
comite
22ed1f1550

+ 23 - 4
paddlers/__init__.py

@@ -1,5 +1,24 @@
-from . import tasks, datasets, transforms, utils, tools, models
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 
-# TODO, add these info in installation
-env_info = {'place': 'gpu', 'num': 1}
-__version__ = 0.1
+__version__ = '0.0.1'
+
+from paddlers.utils.env import get_environ_info, init_parallel_env
+init_parallel_env()
+
+env_info = get_environ_info()
+
+log_level = 2
+
+from . import tasks, datasets, transforms, utils, tools, models

+ 1 - 1
paddlers/models/ppseg/__init__.py

@@ -1,4 +1,4 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 16 - 16
paddlers/models/ppseg/utils/env/__init__.py

@@ -1,16 +1,16 @@
-# Copyright (c) 2020  PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from . import seg_env
-from .sys_env import get_sys_env
+# Copyright (c) 2022  PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from . import seg_env
+from .sys_env import get_sys_env

+ 56 - 56
paddlers/models/ppseg/utils/env/seg_env.py

@@ -1,56 +1,56 @@
-# Copyright (c) 2020  PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-This module is used to store environmental parameters in PaddleSeg.
-
-SEG_HOME : Root directory for storing PaddleSeg related data. Default to ~/.paddleseg.
-           Users can change the default value through the SEG_HOME environment variable.
-DATA_HOME : The directory to store the automatically downloaded dataset, e.g ADE20K.
-PRETRAINED_MODEL_HOME : The directory to store the automatically downloaded pretrained model.
-"""
-
-import os
-
-from paddleseg.utils import logger
-
-
-def _get_user_home():
-    return os.path.expanduser('~')
-
-
-def _get_seg_home():
-    if 'SEG_HOME' in os.environ:
-        home_path = os.environ['SEG_HOME']
-        if os.path.exists(home_path):
-            if os.path.isdir(home_path):
-                return home_path
-            else:
-                logger.warning('SEG_HOME {} is a file!'.format(home_path))
-        else:
-            return home_path
-    return os.path.join(_get_user_home(), '.paddleseg')
-
-
-def _get_sub_home(directory):
-    home = os.path.join(_get_seg_home(), directory)
-    if not os.path.exists(home):
-        os.makedirs(home, exist_ok=True)
-    return home
-
-
-USER_HOME = _get_user_home()
-SEG_HOME = _get_seg_home()
-DATA_HOME = _get_sub_home('dataset')
-TMP_HOME = _get_sub_home('tmp')
-PRETRAINED_MODEL_HOME = _get_sub_home('pretrained_model')
+# Copyright (c) 2022  PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This module is used to store environmental parameters in PaddleSeg.
+
+SEG_HOME : Root directory for storing PaddleSeg related data. Default to ~/.paddleseg.
+           Users can change the default value through the SEG_HOME environment variable.
+DATA_HOME : The directory to store the automatically downloaded dataset, e.g ADE20K.
+PRETRAINED_MODEL_HOME : The directory to store the automatically downloaded pretrained model.
+"""
+
+import os
+
+from paddlers.models.ppseg.utils import logger
+
+
+def _get_user_home():
+    return os.path.expanduser('~')
+
+
+def _get_seg_home():
+    if 'SEG_HOME' in os.environ:
+        home_path = os.environ['SEG_HOME']
+        if os.path.exists(home_path):
+            if os.path.isdir(home_path):
+                return home_path
+            else:
+                logger.warning('SEG_HOME {} is a file!'.format(home_path))
+        else:
+            return home_path
+    return os.path.join(_get_user_home(), '.paddleseg')
+
+
+def _get_sub_home(directory):
+    home = os.path.join(_get_seg_home(), directory)
+    if not os.path.exists(home):
+        os.makedirs(home, exist_ok=True)
+    return home
+
+
+USER_HOME = _get_user_home()
+SEG_HOME = _get_seg_home()
+DATA_HOME = _get_sub_home('dataset')
+TMP_HOME = _get_sub_home('tmp')
+PRETRAINED_MODEL_HOME = _get_sub_home('pretrained_model')

+ 122 - 124
paddlers/models/ppseg/utils/env/sys_env.py

@@ -1,124 +1,122 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import glob
-import os
-import platform
-import subprocess
-import sys
-
-import cv2
-import paddle
-import paddleseg
-
-IS_WINDOWS = sys.platform == 'win32'
-
-
-def _find_cuda_home():
-    '''Finds the CUDA install path. It refers to the implementation of
-    pytorch <https://github.com/pytorch/pytorch/blob/master/torch/utils/cpp_extension.py>.
-    '''
-    # Guess #1
-    cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH')
-    if cuda_home is None:
-        # Guess #2
-        try:
-            which = 'where' if IS_WINDOWS else 'which'
-            nvcc = subprocess.check_output([which,
-                                            'nvcc']).decode().rstrip('\r\n')
-            cuda_home = os.path.dirname(os.path.dirname(nvcc))
-        except Exception:
-            # Guess #3
-            if IS_WINDOWS:
-                cuda_homes = glob.glob(
-                    'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*')
-                if len(cuda_homes) == 0:
-                    cuda_home = ''
-                else:
-                    cuda_home = cuda_homes[0]
-            else:
-                cuda_home = '/usr/local/cuda'
-            if not os.path.exists(cuda_home):
-                cuda_home = None
-    return cuda_home
-
-
-def _get_nvcc_info(cuda_home):
-    if cuda_home is not None and os.path.isdir(cuda_home):
-        try:
-            nvcc = os.path.join(cuda_home, 'bin/nvcc')
-            nvcc = subprocess.check_output(
-                "{} -V".format(nvcc), shell=True).decode()
-            nvcc = nvcc.strip().split('\n')[-1]
-        except subprocess.SubprocessError:
-            nvcc = "Not Available"
-    else:
-        nvcc = "Not Available"
-    return nvcc
-
-
-def _get_gpu_info():
-    try:
-        gpu_info = subprocess.check_output(['nvidia-smi',
-                                            '-L']).decode().strip()
-        gpu_info = gpu_info.split('\n')
-        for i in range(len(gpu_info)):
-            gpu_info[i] = ' '.join(gpu_info[i].split(' ')[:4])
-    except:
-        gpu_info = ' Can not get GPU information. Please make sure CUDA have been installed successfully.'
-    return gpu_info
-
-
-def get_sys_env():
-    """collect environment information"""
-    env_info = {}
-    env_info['platform'] = platform.platform()
-
-    env_info['Python'] = sys.version.replace('\n', '')
-
-    # TODO is_compiled_with_cuda() has not been moved
-    compiled_with_cuda = paddle.is_compiled_with_cuda()
-    env_info['Paddle compiled with cuda'] = compiled_with_cuda
-
-    if compiled_with_cuda:
-        cuda_home = _find_cuda_home()
-        env_info['NVCC'] = _get_nvcc_info(cuda_home)
-        # refer to https://github.com/PaddlePaddle/Paddle/blob/release/2.0-rc/paddle/fluid/platform/device_context.cc#L327
-        v = paddle.get_cudnn_version()
-        v = str(v // 1000) + '.' + str(v % 1000 // 100)
-        env_info['cudnn'] = v
-        if 'gpu' in paddle.get_device():
-            gpu_nums = paddle.distributed.ParallelEnv().nranks
-        else:
-            gpu_nums = 0
-        env_info['GPUs used'] = gpu_nums
-
-        env_info['CUDA_VISIBLE_DEVICES'] = os.environ.get(
-            'CUDA_VISIBLE_DEVICES')
-        if gpu_nums == 0:
-            os.environ['CUDA_VISIBLE_DEVICES'] = ''
-        env_info['GPU'] = _get_gpu_info()
-
-    try:
-        gcc = subprocess.check_output(['gcc', '--version']).decode()
-        gcc = gcc.strip().split('\n')[0]
-        env_info['GCC'] = gcc
-    except:
-        pass
-
-    env_info['PaddleSeg'] = paddleseg.__version__
-    env_info['PaddlePaddle'] = paddle.__version__
-    env_info['OpenCV'] = cv2.__version__
-
-    return env_info
+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import glob
+import os
+import platform
+import subprocess
+import sys
+
+import cv2
+import paddle
+
+IS_WINDOWS = sys.platform == 'win32'
+
+
+def _find_cuda_home():
+    '''Finds the CUDA install path. It refers to the implementation of
+    pytorch <https://github.com/pytorch/pytorch/blob/master/torch/utils/cpp_extension.py>.
+    '''
+    # Guess #1
+    cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH')
+    if cuda_home is None:
+        # Guess #2
+        try:
+            which = 'where' if IS_WINDOWS else 'which'
+            nvcc = subprocess.check_output([which,
+                                            'nvcc']).decode().rstrip('\r\n')
+            cuda_home = os.path.dirname(os.path.dirname(nvcc))
+        except Exception:
+            # Guess #3
+            if IS_WINDOWS:
+                cuda_homes = glob.glob(
+                    'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*')
+                if len(cuda_homes) == 0:
+                    cuda_home = ''
+                else:
+                    cuda_home = cuda_homes[0]
+            else:
+                cuda_home = '/usr/local/cuda'
+            if not os.path.exists(cuda_home):
+                cuda_home = None
+    return cuda_home
+
+
+def _get_nvcc_info(cuda_home):
+    if cuda_home is not None and os.path.isdir(cuda_home):
+        try:
+            nvcc = os.path.join(cuda_home, 'bin/nvcc')
+            nvcc = subprocess.check_output(
+                "{} -V".format(nvcc), shell=True).decode()
+            nvcc = nvcc.strip().split('\n')[-1]
+        except subprocess.SubprocessError:
+            nvcc = "Not Available"
+    else:
+        nvcc = "Not Available"
+    return nvcc
+
+
+def _get_gpu_info():
+    try:
+        gpu_info = subprocess.check_output(['nvidia-smi',
+                                            '-L']).decode().strip()
+        gpu_info = gpu_info.split('\n')
+        for i in range(len(gpu_info)):
+            gpu_info[i] = ' '.join(gpu_info[i].split(' ')[:4])
+    except:
+        gpu_info = ' Can not get GPU information. Please make sure CUDA have been installed successfully.'
+    return gpu_info
+
+
+def get_sys_env():
+    """collect environment information"""
+    env_info = {}
+    env_info['platform'] = platform.platform()
+
+    env_info['Python'] = sys.version.replace('\n', '')
+
+    # TODO is_compiled_with_cuda() has not been moved
+    compiled_with_cuda = paddle.is_compiled_with_cuda()
+    env_info['Paddle compiled with cuda'] = compiled_with_cuda
+
+    if compiled_with_cuda:
+        cuda_home = _find_cuda_home()
+        env_info['NVCC'] = _get_nvcc_info(cuda_home)
+        # refer to https://github.com/PaddlePaddle/Paddle/blob/release/2.0-rc/paddle/fluid/platform/device_context.cc#L327
+        v = paddle.get_cudnn_version()
+        v = str(v // 1000) + '.' + str(v % 1000 // 100)
+        env_info['cudnn'] = v
+        if 'gpu' in paddle.get_device():
+            gpu_nums = paddle.distributed.ParallelEnv().nranks
+        else:
+            gpu_nums = 0
+        env_info['GPUs used'] = gpu_nums
+
+        env_info['CUDA_VISIBLE_DEVICES'] = os.environ.get(
+            'CUDA_VISIBLE_DEVICES')
+        if gpu_nums == 0:
+            os.environ['CUDA_VISIBLE_DEVICES'] = ''
+        env_info['GPU'] = _get_gpu_info()
+
+    try:
+        gcc = subprocess.check_output(['gcc', '--version']).decode()
+        gcc = gcc.strip().split('\n')[0]
+        env_info['GCC'] = gcc
+    except:
+        pass
+
+    env_info['PaddlePaddle'] = paddle.__version__
+    env_info['OpenCV'] = cv2.__version__
+
+    return env_info

+ 35 - 3
paddlers/transforms/operators.py

@@ -16,6 +16,8 @@ import numpy as np
 import cv2
 import copy
 import random
+import imghdr
+import os
 from PIL import Image
 import paddlers
 
@@ -146,9 +148,39 @@ class Decode(Transform):
         super(Decode, self).__init__()
         self.to_rgb = to_rgb
 
-    def read_img(self, img_path):
-        return cv2.imread(img_path, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR |
-                          cv2.IMREAD_COLOR)
+    def read_img(self, img_path, input_channel=3):
+        img_format = imghdr.what(img_path)
+        name, ext = os.path.splitext(img_path)
+        if img_format == 'tiff' or ext == '.img':
+            try:
+                import gdal
+            except:
+                try:
+                    from osgeo import gdal
+                except:
+                    raise Exception(
+                        "Failed to import gdal! You can try use conda to install gdal"
+                    )
+                    six.reraise(*sys.exc_info())
+
+            dataset = gdal.Open(img_path)
+            if dataset == None:
+                raise Exception('Can not open', img_path)
+            im_data = dataset.ReadAsArray()
+            if im_data.ndim == 3:
+                im_data.transpose((1, 2, 0))
+            return im_data
+        elif img_format in ['jpeg', 'bmp', 'png', 'jpg']:
+            if input_channel == 3:
+                return cv2.imread(img_path, cv2.IMREAD_ANYDEPTH |
+                                  cv2.IMREAD_ANYCOLOR | cv2.IMREAD_COLOR)
+            else:
+                return cv2.imread(im_file, cv2.IMREAD_ANYDEPTH |
+                                  cv2.IMREAD_ANYCOLOR)
+        elif ext == '.npy':
+            return np.load(img_path)
+        else:
+            raise Exception('Image format {} is not supported!'.format(ext))
 
     def apply_im(self, im_path):
         if isinstance(im_path, str):

+ 1 - 1
paddlers/utils/utils.py

@@ -74,7 +74,7 @@ def path_normalization(path):
 
 
 def is_pic(img_name):
-    valid_suffix = ['JPEG', 'jpeg', 'JPG', 'jpg', 'BMP', 'bmp', 'PNG', 'png']
+    valid_suffix = ['JPEG', 'jpeg', 'JPG', 'jpg', 'BMP', 'bmp', 'PNG', 'png', 'tiff']
     suffix = img_name.split('.')[-1]
     if suffix not in valid_suffix:
         return False

+ 1 - 1
requirements.txt

@@ -14,4 +14,4 @@ motmetrics
 matplotlib
 chardet
 openpyxl
-GDAL >= 3.1.3
+GDAL >= 3.1.3

+ 43 - 0
setup.py

@@ -0,0 +1,43 @@
+# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import setuptools
+
+long_description = "Awesome Remote Sensing Toolkit based on PaddlePaddle"
+
+setuptools.setup(
+    name="paddlers",
+    version='0.0.1',
+    author="paddlers",
+    author_email="paddlers@baidu.com",
+    description=long_description,
+    long_description=long_description,
+    long_description_content_type="text/plain",
+    url="https://github.com/PaddleCV-SIG/PaddleRS",
+    packages=setuptools.find_packages(),
+    setup_requires=['cython', 'numpy'],
+    install_requires=[
+        "pycocotools", 'pyyaml', 'colorama', 'tqdm', 'paddleslim==2.2.1',
+        'visualdl>=2.2.2', 'shapely>=1.7.0', 'opencv-python', 'scipy', 'lap',
+        'motmetrics', 'scikit-learn==0.23.2', 'chardet', 'flask_cors',
+        'openpyxl', 'gdal'
+    ],
+    classifiers=[
+        "Programming Language :: Python :: 3",
+        "License :: OSI Approved :: Apache Software License",
+        "Operating System :: OS Independent",
+    ],
+    license='Apache 2.0',
+    )
+

+ 64 - 0
tutorials/train/detection/faster_rcnn_sar_ship.py

@@ -0,0 +1,64 @@
+import os
+import paddlers as pdrs
+from paddlers import transforms as T
+
+# download dataset
+data_dir = 'sar_ship_1'
+if not os.path.exists(data_dir):
+    dataset_url = 'https://paddleseg.bj.bcebos.com/dataset/sar_ship_1.tar.gz'
+    pdrs.utils.download_and_decompress(dataset_url, path='./')
+
+# define transforms
+train_transforms = T.Compose([
+    T.RandomDistort(),
+    T.RandomExpand(im_padding_value=[123.675, 116.28, 103.53]),
+    T.RandomCrop(),
+    T.RandomHorizontalFlip(),
+    T.BatchRandomResize(
+        target_sizes=[320, 352, 384, 416, 448, 480, 512, 544, 576, 608],
+        interp='RANDOM'),
+    T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+])
+
+eval_transforms = T.Compose([
+    T.Resize(
+        target_size=608, interp='CUBIC'), T.Normalize(
+            mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+])
+
+# define dataset
+train_file_list = os.path.join(data_dir, 'train.txt')
+val_file_list = os.path.join(data_dir, 'valid.txt')
+label_file_list = os.path.join(data_dir, 'labels.txt')
+train_dataset = pdrs.datasets.VOCDetection(
+    data_dir=data_dir,
+    file_list=train_file_list,
+    label_list=label_file_list,
+    transforms=train_transforms,
+    shuffle=True)
+
+eval_dataset = pdrs.datasets.VOCDetection(
+    data_dir=data_dir,
+    file_list=train_file_list,
+    label_list=label_file_list,
+    transforms=eval_transforms,
+    shuffle=False)
+
+# define models
+num_classes = len(train_dataset.labels)
+model = pdrs.tasks.det.FasterRCNN(num_classes=num_classes)
+
+# train
+model.train(
+    num_epochs=60,
+    train_dataset=train_dataset,
+    train_batch_size=2,
+    eval_dataset=eval_dataset,
+    pretrain_weights='COCO',
+    learning_rate=0.005 / 12,
+    warmup_steps=10,
+    warmup_start_lr=0.0,
+    save_interval_epochs=5,
+    lr_decay_epochs=[20, 40],
+    save_dir='output/faster_rcnn_sar_ship',
+    use_vdl=True)

+ 28 - 0
tutorials/train/detection/readme.md

@@ -0,0 +1,28 @@
+The detection training demo:
+* dataset: AIR-SARShip-1.0 
+* target: ship
+* model: faster_rcnn
+
+
+Run the demo:
+
+1. Install PaddleRS
+```
+git clone https://github.com/PaddleCV-SIG/PaddleRS.git
+cd PaddleRS
+pip install -r requirements.txt
+python setup.py install
+```
+
+2. Run the demo
+```
+cd tutorials/train/detection/
+
+# run training on single GPU
+export CUDA_VISIBLE_DEVICES=0
+python faster_rcnn_sar_ship.py
+
+# run traing on multi gpu
+export CUDA_VISIBLE_DEVICES=0,1
+python -m paddle.distributed.launch faster_rcnn_sar_ship.py
+```