代码参考(https://blog.csdn.net/disiwei1012/article/details/79928679)
import os
import sysimport randomimport mathimport numpy as npimport skimage.ioimport matplotlibimport matplotlib.pyplot as plt# import cocofrom mrcnn import utilsfrom mrcnn import model as modellibfrom mrcnn import visualizefrom mrcnn.config import Config#%matplotlib inline # Root directory of the projectROOT_DIR = os.getcwd()# Directory to save logs and trained modelMODEL_DIR = os.path.join(ROOT_DIR, "logs")# Local path to trained weights fileCOCO_MODEL_PATH = "mask_rcnn_shapes_0001.h5"# Directory of images to run detection onIMAGE_DIR = os.path.join(ROOT_DIR, "images")class ShapesConfig(Config): """Configuration for training on the toy shapes dataset. Derives from the base Config class and overrides values specific to the toy shapes dataset. """ # Give the configuration a recognizable name NAME = "shapes"# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU). GPU_COUNT = 1 IMAGES_PER_GPU = 1# Number of classes (including background)
NUM_CLASSES = 1 + 1 # background + 3 shapes# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape. IMAGE_MIN_DIM = 1024 IMAGE_MAX_DIM = 1280# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8 * 6, 16 * 6, 32 * 6, 64 * 6, 128 * 6) # anchor side in pixels# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs. TRAIN_ROIS_PER_IMAGE = 32# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100# use small validation steps since the epoch is small
VALIDATION_STEPS = 5class InferenceConfig(ShapesConfig):
# Set batch size to 1 since we'll be running inference on # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1config = InferenceConfig()config.display() # Create model object in inference mode. model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config) # Load weights trained on MS-COCO model.load_weights(COCO_MODEL_PATH, by_name=True) # COCO Class names # Index of the class in the list is its ID. For example, to get ID of # the teddy bear class, use: class_names.index('teddy bear') class_names = ['BG', 'mono']# Load a random image from the images folderfile_names = next(os.walk(IMAGE_DIR))[2]
image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))# Run detection
results = model.detect([image], verbose=1)# Visualize resultsr = results[0]visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'])print('OK')