Analyzing the Impact of
Activity, Lying and Ruminating Features for Accurate
Calving Prediction in Indian Cattle
Introduction

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut elit tellus, luctus nec ullamcorper mattis, pulvinar dapibus leo.
Methodology
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut elit tellus, luctus nec ullamcorper mattis, pulvinar dapibus leo.


import cv2
import torch
from torchvision import models, transforms
from custom_dataset import MyLazyDataset
from torchvision.models.quantization import mobilenet_v3_large
import torchvision
import torch
from torchvision import transforms, datasets
from torch.utils.data import Dataset, DataLoader
import torchvision.models.quantization as models
import torch.optim as optim
import time
import copy
from torch import nn
import numpy as np
from PIL import Image
import pyrealsense2 as rs
# Set device
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
# Load the model
model = models.mobilenet_v2(pretrained=False)
model.load_state_dict(torch.load('D:\\Documents\\Exp_Data\\datafinaldatamain.pt', map_location=torch.device('cpu')))
model.to(DEVICE)
model.eval()
# Define data transforms for the test data
data_transforms_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) # Enable depth stream
pipeline.start(config)
fno = 0
num_segments = 3
try:
while True:
frames = pipeline.wait_for_frames()
color_frame = frames.get_color_frame()
depth_frame = frames.get_depth_frame()
color_image = np.asanyarray(color_frame.get_data())
# Get the width and height of the frames
width, height = color_frame.get_width(), color_frame.get_height()
segment_height = height // num_segments
for i in range(num_segments):
start_y = i * segment_height
end_y = (i + 1) * segment_height
segment = color_image[start_y:end_y, :]
input_image = Image.fromarray(cv2.cvtColor(segment, cv2.COLOR_BGR2RGB))
input_tensor = data_transforms_test(input_image).unsqueeze(0).to(DEVICE)
# Calculate center coordinates
center_x = width // 2
center_y = start_y + (segment_height // 2)
# Get depth value at center point
depth_value = depth_frame.get_distance(center_x, center_y) if depth_frame else 0.0
# Draw a point at the center
cv2.circle(color_image, (center_x, center_y), 3, (0, 255, 0), -1)
with torch.no_grad():
outputs = model(input_tensor)
_, preds = torch.max(outputs, 1)
if preds.item() == 0:
label = "Downstairs"
elif preds.item() == 1:
label = "Overground"
else:
label = "Upstairs"
confidence = outputs[0][preds.item()].item()
cv2.rectangle(color_image, (0, start_y), (width, end_y), (0, 0, 0), 2)
cv2.putText(color_image, f"Segment {i+1}", (10, start_y + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(color_image, f"Class: {label}", (10, start_y + 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(color_image, f"Confidence: {confidence:.2f}", (10, start_y + 90), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(color_image, f"Depth: {depth_value:.4f} m", (center_x + 10, center_y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 1, cv2.LINE_AA)
frame_display_string = "Frame: " + str(fno)
cv2.putText(color_image, frame_display_string, (width - 100, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1, cv2.LINE_AA)
cv2.imshow('RealSense Prediction', color_image)
fno += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
finally:
pipeline.stop()
cv2.destroyAllWindows()
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut elit tellus, luctus nec ullamcorper mattis, pulvinar dapibus leo.
Results & Discussion
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut elit tellus, luctus nec ullamcorper mattis, pulvinar dapibus leo.


Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut elit tellus, luctus nec ullamcorper mattis, pulvinar dapibus leo.
Conclusion & Future Work
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut elit tellus, luctus nec ullamcorper mattis, pulvinar dapibus leo.
Additional Documents
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut elit tellus, luctus nec ullamcorper mattis, pulvinar dapibus leo.
Guide: