Computer vision practice (XIII) parking space identification (with complete code)

My WeChat public name: AI research subscription number
WeChat public address ID:MultiAgent1024
Introduction to the public address: we mainly study related contents such as reinforcement learning, computer vision, deep learning, machine learning and so on, and share learning notes and experiences in the learning process. Looking forward to your attention, welcome to learn, exchange and progress together!

Do the following:

  1. How many cars are there in all.
  2. How many parking spaces are available.
  3. Which parking space is occupied, which parking space is not occupied.

   read image:

After getting the image, we need to preprocess it. If it is lower than 120, or higher than 255, we need to process it as 0.

def select_rgb_white_yellow(self,image): 
    #Filter out background
    lower = np.uint8([120, 120, 120])
    upper = np.uint8([255, 255, 255])
    # The value between lower red and upper red becomes 0, and the value between lower red and upper red becomes 255, which is equivalent to filtering the background
    white_mask = cv2.inRange(image, lower, upper)
    self.cv_show('white_mask',white_mask)
    masked = cv2.bitwise_and(image, image, mask = white_mask)
    self.cv_show('masked',masked)
    return masked

                      .

     then gray processing and edge detection are performed:

   select valid area manually:

def select_region(self,image):
    """
            //Select area manually
    """
    # first, define the polygon by vertices
    rows, cols = image.shape[:2]
    pt_1  = [cols*0.05, rows*0.90]
    pt_2 = [cols*0.05, rows*0.70]
    pt_3 = [cols*0.30, rows*0.55]
    pt_4 = [cols*0.6, rows*0.15]
    pt_5 = [cols*0.90, rows*0.15] 
    pt_6 = [cols*0.90, rows*0.90]
    vertices = np.array([[pt_1, pt_2, pt_3, pt_4, pt_5, pt_6]], dtype=np.int32) 
    point_img = image.copy()       
    point_img = cv2.cvtColor(point_img, cv2.COLOR_GRAY2RGB)
    for point in vertices[0]:
        cv2.circle(point_img, (point[0],point[1]), 10, (0,0,255), 4)
    self.cv_show('point_img',point_img)
    return self.filter_region(image, vertices)

After                   

def filter_region(self,image, vertices):
    """
            //Remove unnecessary places
    """
    mask = np.zeros_like(image)
    if len(mask.shape)==2:
        cv2.fillPoly(mask, vertices, 255)
        self.cv_show('mask', mask)    
    return cv2.bitwise_and(image, mask)

                  

def hough_lines(self,image):
    #The input image needs to be the result of edge detection
    #Minlinelength (the shortest length of the line, which is ignored) and MaxLineCap (the maximum interval between two lines, which is less than this value, is considered to be a line)
    #rho distance precision, theta angle precision and threshod are detected only when they exceed the set threshold
    return cv2.HoughLinesP(image, rho=0.1, theta=np.pi/10, threshold=15, minLineLength=9, maxLineGap=4)
def draw_lines(self,image, lines, color=[255, 0, 0], thickness=2, make_copy=True):
    # Line detected by filtered Hough transform
    if make_copy:
        image = np.copy(image) 
    cleaned = []
    for line in lines:
        for x1,y1,x2,y2 in line:
            if abs(y2-y1) <=1 and abs(x2-x1) >=25 and abs(x2-x1) <= 55:
                cleaned.append((x1,y1,x2,y2))
                cv2.line(image, (x1, y1), (x2, y2), color, thickness)
    print(" No lines detected: ", len(cleaned))
    return image

def identify_blocks(self,image, lines, make_copy=True):
    if make_copy:
        new_image = np.copy(image)
    #Step 1: filter partial lines
    cleaned = []
    for line in lines:
        for x1,y1,x2,y2 in line:
            if abs(y2-y1) <=1 and abs(x2-x1) >=25 and abs(x2-x1) <= 55:
                cleaned.append((x1,y1,x2,y2))
    
    #Step 2: sort lines by x1
    import operator
    list1 = sorted(cleaned, key=operator.itemgetter(0, 1))
    
    #Step 3: multiple columns found, equivalent to one row of cars per column
    clusters = {}
    dIndex = 0
    clus_dist = 10

    for i in range(len(list1) - 1):
        distance = abs(list1[i+1][0] - list1[i][0])
        if distance <= clus_dist:
            if not dIndex in clusters.keys(): clusters[dIndex] = []
            clusters[dIndex].append(list1[i])
            clusters[dIndex].append(list1[i + 1]) 

        else:
            dIndex += 1
    
    #Step 4: get coordinates
    rects = {}
    i = 0
    for key in clusters:
        all_list = clusters[key]
        cleaned = list(set(all_list))
        if len(cleaned) > 5:
            cleaned = sorted(cleaned, key=lambda tup: tup[1])
            avg_y1 = cleaned[0][1]
            avg_y2 = cleaned[-1][1]
            avg_x1 = 0
            avg_x2 = 0
            for tup in cleaned:
                avg_x1 += tup[0]
                avg_x2 += tup[2]
            avg_x1 = avg_x1/len(cleaned)
            avg_x2 = avg_x2/len(cleaned)
            rects[i] = (avg_x1, avg_y1, avg_x2, avg_y2)
            i += 1
    
    print("Num Parking Lanes: ", len(rects))
    #Step 5: draw the column rectangle
    buff = 7
    for key in rects:
        tup_topLeft = (int(rects[key][0] - buff), int(rects[key][1]))
        tup_botRight = (int(rects[key][2] + buff), int(rects[key][3]))
        cv2.rectangle(new_image, tup_topLeft,tup_botRight,(0,255,0),3)
    return new_image, rects

   area by column:

More detailed division:

After    , a neural network is constructed to classify the pictures in the box.

The full code of the public number is back to the parking lot identification.

137 original articles published, praised by 111, visited 160000+
Private letter follow

Tags: less Lambda network

Posted on Wed, 15 Jan 2020 07:29:20 -0500 by frosty1433