Wrong dtecttion after exporting he model

I have trained my model for custom object detection for single class on colab and then converted it to onnx format. I had some conversion issues so I did it on 1.11.0 version of pytorch.

When I am using my .onx format in opencv , I am getting totally wrong predictions. However, on colab the predictions are right. I dont understand why?






import cv2
import numpy as np
import cv2
import glob
import time
start_time = time.time()
loop = 0

for img in glob.glob("/home/laddu/Desktop/Localization/dataset/loop_images/*.png"):
    img= cv2.imread(img)
    current = time.time()
    loop_rate = current - start_time
    start_time = current

    print('Loop Rate in Hz', (1/loop_rate))

#img = cv2.imread("/home/laddu/21.png")
    #print(img.shape)
    net = cv2.dnn.readNetFromONNX("/home/laddu/Downloads/best.onnx")
   
    img = cv2.resize(img, (1000,600))
    blob = cv2.dnn.blobFromImage(img,scalefactor= 1/255,size=(640,640),mean=[0,0,0],swapRB= True, crop= False)
    net.setInput(blob)
    detections = net.forward()[0]


    # cx,cy , w,h, confidence, 80 class_scores
    # class_ids, confidences, boxes

    classes_ids = []
    confidences = []
    boxes = []
    rows = detections.shape[0]
    #print("detecttions",detections.shape)
    #print("rows",rows.shape)

    img_width, img_height = img.shape[1], img.shape[0]
    x_scale = img_width/640
    y_scale = img_height/640

    for i in range(rows):
        row = detections[i]
    
        #print("row",row)
        confidence = row[4]
        #print("confidence",confidence)
        #print(row)
        if confidence > 0.7:
            classes_score = row[5:]
            ind = np.argmax(classes_score)
            if classes_score[ind] > 0.8:
                classes_ids.append(ind)
                confidences.append(confidence)
                cx, cy, w, h = row[:4]
                x1 = int((cx- w/2)*x_scale)
                y1 = int((cy-h/2)*y_scale)
                width = int(w * x_scale)
                height = int(h * y_scale)
                box = np.array([x1,y1,width,height])
                boxes.append(box)

    indices = cv2.dnn.NMSBoxes(boxes,confidences,0.5,0.5)

    for i in indices:
        x1,y1,w,h = boxes[i]

        conf = confidences[i]

        cv2.rectangle(img,(x1,y1),(x1+w,y1+h),(255,0,0),2)

    cv2.imshow("VIDEO",img)
    k = cv2.waitKey(1)