diff options
Diffstat (limited to 'r_facelib/detection/yolov5face/utils')
| -rw-r--r-- | r_facelib/detection/yolov5face/utils/__init__.py | 0 | ||||
| -rw-r--r-- | r_facelib/detection/yolov5face/utils/autoanchor.py | 12 | ||||
| -rw-r--r-- | r_facelib/detection/yolov5face/utils/datasets.py | 35 | ||||
| -rw-r--r-- | r_facelib/detection/yolov5face/utils/extract_ckpt.py | 5 | ||||
| -rw-r--r-- | r_facelib/detection/yolov5face/utils/general.py | 271 | ||||
| -rw-r--r-- | r_facelib/detection/yolov5face/utils/torch_utils.py | 40 | 
6 files changed, 363 insertions, 0 deletions
| diff --git a/r_facelib/detection/yolov5face/utils/__init__.py b/r_facelib/detection/yolov5face/utils/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/r_facelib/detection/yolov5face/utils/__init__.py diff --git a/r_facelib/detection/yolov5face/utils/autoanchor.py b/r_facelib/detection/yolov5face/utils/autoanchor.py new file mode 100644 index 0000000..cb0de89 --- /dev/null +++ b/r_facelib/detection/yolov5face/utils/autoanchor.py @@ -0,0 +1,12 @@ +# Auto-anchor utils
 +
 +
 +def check_anchor_order(m):
 +    # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
 +    a = m.anchor_grid.prod(-1).view(-1)  # anchor area
 +    da = a[-1] - a[0]  # delta a
 +    ds = m.stride[-1] - m.stride[0]  # delta s
 +    if da.sign() != ds.sign():  # same order
 +        print("Reversing anchor order")
 +        m.anchors[:] = m.anchors.flip(0)
 +        m.anchor_grid[:] = m.anchor_grid.flip(0)
 diff --git a/r_facelib/detection/yolov5face/utils/datasets.py b/r_facelib/detection/yolov5face/utils/datasets.py new file mode 100644 index 0000000..a72609b --- /dev/null +++ b/r_facelib/detection/yolov5face/utils/datasets.py @@ -0,0 +1,35 @@ +import cv2
 +import numpy as np
 +
 +
 +def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scale_fill=False, scaleup=True):
 +    # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
 +    shape = img.shape[:2]  # current shape [height, width]
 +    if isinstance(new_shape, int):
 +        new_shape = (new_shape, new_shape)
 +
 +    # Scale ratio (new / old)
 +    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
 +    if not scaleup:  # only scale down, do not scale up (for better test mAP)
 +        r = min(r, 1.0)
 +
 +    # Compute padding
 +    ratio = r, r  # width, height ratios
 +    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
 +    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
 +    if auto:  # minimum rectangle
 +        dw, dh = np.mod(dw, 64), np.mod(dh, 64)  # wh padding
 +    elif scale_fill:  # stretch
 +        dw, dh = 0.0, 0.0
 +        new_unpad = (new_shape[1], new_shape[0])
 +        ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios
 +
 +    dw /= 2  # divide padding into 2 sides
 +    dh /= 2
 +
 +    if shape[::-1] != new_unpad:  # resize
 +        img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
 +    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
 +    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
 +    img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
 +    return img, ratio, (dw, dh)
 diff --git a/r_facelib/detection/yolov5face/utils/extract_ckpt.py b/r_facelib/detection/yolov5face/utils/extract_ckpt.py new file mode 100644 index 0000000..e6bde00 --- /dev/null +++ b/r_facelib/detection/yolov5face/utils/extract_ckpt.py @@ -0,0 +1,5 @@ +import torch
 +import sys
 +sys.path.insert(0,'./facelib/detection/yolov5face')
 +model = torch.load('facelib/detection/yolov5face/yolov5n-face.pt', map_location='cpu')['model']
 +torch.save(model.state_dict(),'../../models/facedetection')
\ No newline at end of file diff --git a/r_facelib/detection/yolov5face/utils/general.py b/r_facelib/detection/yolov5face/utils/general.py new file mode 100644 index 0000000..618d2f3 --- /dev/null +++ b/r_facelib/detection/yolov5face/utils/general.py @@ -0,0 +1,271 @@ +import math
 +import time
 +
 +import numpy as np
 +import torch
 +import torchvision
 +
 +
 +def check_img_size(img_size, s=32):
 +    # Verify img_size is a multiple of stride s
 +    new_size = make_divisible(img_size, int(s))  # ceil gs-multiple
 +    # if new_size != img_size:
 +    #     print(f"WARNING: --img-size {img_size:g} must be multiple of max stride {s:g}, updating to {new_size:g}")
 +    return new_size
 +
 +
 +def make_divisible(x, divisor):
 +    # Returns x evenly divisible by divisor
 +    return math.ceil(x / divisor) * divisor
 +
 +
 +def xyxy2xywh(x):
 +    # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
 +    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
 +    y[:, 0] = (x[:, 0] + x[:, 2]) / 2  # x center
 +    y[:, 1] = (x[:, 1] + x[:, 3]) / 2  # y center
 +    y[:, 2] = x[:, 2] - x[:, 0]  # width
 +    y[:, 3] = x[:, 3] - x[:, 1]  # height
 +    return y
 +
 +
 +def xywh2xyxy(x):
 +    # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
 +    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
 +    y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x
 +    y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y
 +    y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x
 +    y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y
 +    return y
 +
 +
 +def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
 +    # Rescale coords (xyxy) from img1_shape to img0_shape
 +    if ratio_pad is None:  # calculate from img0_shape
 +        gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new
 +        pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding
 +    else:
 +        gain = ratio_pad[0][0]
 +        pad = ratio_pad[1]
 +
 +    coords[:, [0, 2]] -= pad[0]  # x padding
 +    coords[:, [1, 3]] -= pad[1]  # y padding
 +    coords[:, :4] /= gain
 +    clip_coords(coords, img0_shape)
 +    return coords
 +
 +
 +def clip_coords(boxes, img_shape):
 +    # Clip bounding xyxy bounding boxes to image shape (height, width)
 +    boxes[:, 0].clamp_(0, img_shape[1])  # x1
 +    boxes[:, 1].clamp_(0, img_shape[0])  # y1
 +    boxes[:, 2].clamp_(0, img_shape[1])  # x2
 +    boxes[:, 3].clamp_(0, img_shape[0])  # y2
 +
 +
 +def box_iou(box1, box2):
 +    # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
 +    """
 +    Return intersection-over-union (Jaccard index) of boxes.
 +    Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
 +    Arguments:
 +        box1 (Tensor[N, 4])
 +        box2 (Tensor[M, 4])
 +    Returns:
 +        iou (Tensor[N, M]): the NxM matrix containing the pairwise
 +            IoU values for every element in boxes1 and boxes2
 +    """
 +
 +    def box_area(box):
 +        return (box[2] - box[0]) * (box[3] - box[1])
 +
 +    area1 = box_area(box1.T)
 +    area2 = box_area(box2.T)
 +
 +    inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
 +    return inter / (area1[:, None] + area2 - inter)
 +
 +
 +def non_max_suppression_face(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
 +    """Performs Non-Maximum Suppression (NMS) on inference results
 +    Returns:
 +         detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
 +    """
 +
 +    nc = prediction.shape[2] - 15  # number of classes
 +    xc = prediction[..., 4] > conf_thres  # candidates
 +
 +    # Settings
 +    # (pixels) maximum box width and height
 +    max_wh = 4096
 +    time_limit = 10.0  # seconds to quit after
 +    redundant = True  # require redundant detections
 +    multi_label = nc > 1  # multiple labels per box (adds 0.5ms/img)
 +    merge = False  # use merge-NMS
 +
 +    t = time.time()
 +    output = [torch.zeros((0, 16), device=prediction.device)] * prediction.shape[0]
 +    for xi, x in enumerate(prediction):  # image index, image inference
 +        # Apply constraints
 +        x = x[xc[xi]]  # confidence
 +
 +        # Cat apriori labels if autolabelling
 +        if labels and len(labels[xi]):
 +            label = labels[xi]
 +            v = torch.zeros((len(label), nc + 15), device=x.device)
 +            v[:, :4] = label[:, 1:5]  # box
 +            v[:, 4] = 1.0  # conf
 +            v[range(len(label)), label[:, 0].long() + 15] = 1.0  # cls
 +            x = torch.cat((x, v), 0)
 +
 +        # If none remain process next image
 +        if not x.shape[0]:
 +            continue
 +
 +        # Compute conf
 +        x[:, 15:] *= x[:, 4:5]  # conf = obj_conf * cls_conf
 +
 +        # Box (center x, center y, width, height) to (x1, y1, x2, y2)
 +        box = xywh2xyxy(x[:, :4])
 +
 +        # Detections matrix nx6 (xyxy, conf, landmarks, cls)
 +        if multi_label:
 +            i, j = (x[:, 15:] > conf_thres).nonzero(as_tuple=False).T
 +            x = torch.cat((box[i], x[i, j + 15, None], x[:, 5:15], j[:, None].float()), 1)
 +        else:  # best class only
 +            conf, j = x[:, 15:].max(1, keepdim=True)
 +            x = torch.cat((box, conf, x[:, 5:15], j.float()), 1)[conf.view(-1) > conf_thres]
 +
 +        # Filter by class
 +        if classes is not None:
 +            x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
 +
 +        # If none remain process next image
 +        n = x.shape[0]  # number of boxes
 +        if not n:
 +            continue
 +
 +        # Batched NMS
 +        c = x[:, 15:16] * (0 if agnostic else max_wh)  # classes
 +        boxes, scores = x[:, :4] + c, x[:, 4]  # boxes (offset by class), scores
 +        i = torchvision.ops.nms(boxes, scores, iou_thres)  # NMS
 +
 +        if merge and (1 < n < 3e3):  # Merge NMS (boxes merged using weighted mean)
 +            # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
 +            iou = box_iou(boxes[i], boxes) > iou_thres  # iou matrix
 +            weights = iou * scores[None]  # box weights
 +            x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True)  # merged boxes
 +            if redundant:
 +                i = i[iou.sum(1) > 1]  # require redundancy
 +
 +        output[xi] = x[i]
 +        if (time.time() - t) > time_limit:
 +            break  # time limit exceeded
 +
 +    return output
 +
 +
 +def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
 +    """Performs Non-Maximum Suppression (NMS) on inference results
 +
 +    Returns:
 +         detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
 +    """
 +
 +    nc = prediction.shape[2] - 5  # number of classes
 +    xc = prediction[..., 4] > conf_thres  # candidates
 +
 +    # Settings
 +    # (pixels) maximum box width and height
 +    max_wh = 4096
 +    time_limit = 10.0  # seconds to quit after
 +    redundant = True  # require redundant detections
 +    multi_label = nc > 1  # multiple labels per box (adds 0.5ms/img)
 +    merge = False  # use merge-NMS
 +
 +    t = time.time()
 +    output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
 +    for xi, x in enumerate(prediction):  # image index, image inference
 +        x = x[xc[xi]]  # confidence
 +
 +        # Cat apriori labels if autolabelling
 +        if labels and len(labels[xi]):
 +            label_id = labels[xi]
 +            v = torch.zeros((len(label_id), nc + 5), device=x.device)
 +            v[:, :4] = label_id[:, 1:5]  # box
 +            v[:, 4] = 1.0  # conf
 +            v[range(len(label_id)), label_id[:, 0].long() + 5] = 1.0  # cls
 +            x = torch.cat((x, v), 0)
 +
 +        # If none remain process next image
 +        if not x.shape[0]:
 +            continue
 +
 +        # Compute conf
 +        x[:, 5:] *= x[:, 4:5]  # conf = obj_conf * cls_conf
 +
 +        # Box (center x, center y, width, height) to (x1, y1, x2, y2)
 +        box = xywh2xyxy(x[:, :4])
 +
 +        # Detections matrix nx6 (xyxy, conf, cls)
 +        if multi_label:
 +            i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
 +            x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
 +        else:  # best class only
 +            conf, j = x[:, 5:].max(1, keepdim=True)
 +            x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
 +
 +        # Filter by class
 +        if classes is not None:
 +            x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
 +
 +        # Check shape
 +        n = x.shape[0]  # number of boxes
 +        if not n:  # no boxes
 +            continue
 +
 +        x = x[x[:, 4].argsort(descending=True)]  # sort by confidence
 +
 +        # Batched NMS
 +        c = x[:, 5:6] * (0 if agnostic else max_wh)  # classes
 +        boxes, scores = x[:, :4] + c, x[:, 4]  # boxes (offset by class), scores
 +        i = torchvision.ops.nms(boxes, scores, iou_thres)  # NMS
 +        if merge and (1 < n < 3e3):  # Merge NMS (boxes merged using weighted mean)
 +            # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
 +            iou = box_iou(boxes[i], boxes) > iou_thres  # iou matrix
 +            weights = iou * scores[None]  # box weights
 +            x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True)  # merged boxes
 +            if redundant:
 +                i = i[iou.sum(1) > 1]  # require redundancy
 +
 +        output[xi] = x[i]
 +        if (time.time() - t) > time_limit:
 +            print(f"WARNING: NMS time limit {time_limit}s exceeded")
 +            break  # time limit exceeded
 +
 +    return output
 +
 +
 +def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None):
 +    # Rescale coords (xyxy) from img1_shape to img0_shape
 +    if ratio_pad is None:  # calculate from img0_shape
 +        gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new
 +        pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding
 +    else:
 +        gain = ratio_pad[0][0]
 +        pad = ratio_pad[1]
 +
 +    coords[:, [0, 2, 4, 6, 8]] -= pad[0]  # x padding
 +    coords[:, [1, 3, 5, 7, 9]] -= pad[1]  # y padding
 +    coords[:, :10] /= gain
 +    coords[:, 0].clamp_(0, img0_shape[1])  # x1
 +    coords[:, 1].clamp_(0, img0_shape[0])  # y1
 +    coords[:, 2].clamp_(0, img0_shape[1])  # x2
 +    coords[:, 3].clamp_(0, img0_shape[0])  # y2
 +    coords[:, 4].clamp_(0, img0_shape[1])  # x3
 +    coords[:, 5].clamp_(0, img0_shape[0])  # y3
 +    coords[:, 6].clamp_(0, img0_shape[1])  # x4
 +    coords[:, 7].clamp_(0, img0_shape[0])  # y4
 +    coords[:, 8].clamp_(0, img0_shape[1])  # x5
 +    coords[:, 9].clamp_(0, img0_shape[0])  # y5
 +    return coords
 diff --git a/r_facelib/detection/yolov5face/utils/torch_utils.py b/r_facelib/detection/yolov5face/utils/torch_utils.py new file mode 100644 index 0000000..f702962 --- /dev/null +++ b/r_facelib/detection/yolov5face/utils/torch_utils.py @@ -0,0 +1,40 @@ +import torch
 +from torch import nn
 +
 +
 +def fuse_conv_and_bn(conv, bn):
 +    # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
 +    fusedconv = (
 +        nn.Conv2d(
 +            conv.in_channels,
 +            conv.out_channels,
 +            kernel_size=conv.kernel_size,
 +            stride=conv.stride,
 +            padding=conv.padding,
 +            groups=conv.groups,
 +            bias=True,
 +        )
 +        .requires_grad_(False)
 +        .to(conv.weight.device)
 +    )
 +
 +    # prepare filters
 +    w_conv = conv.weight.clone().view(conv.out_channels, -1)
 +    w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
 +    fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
 +
 +    # prepare spatial bias
 +    b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
 +    b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
 +    fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
 +
 +    return fusedconv
 +
 +
 +def copy_attr(a, b, include=(), exclude=()):
 +    # Copy attributes from b to a, options to only include [...] and to exclude [...]
 +    for k, v in b.__dict__.items():
 +        if (include and k not in include) or k.startswith("_") or k in exclude:
 +            continue
 +
 +        setattr(a, k, v)
 | 
