Skip to content

Instantly share code, notes, and snippets.

View cipher982's full-sized avatar
⌨️
Getting back in the rhythm

David Rose cipher982

⌨️
Getting back in the rhythm
View GitHub Profile
@cipher982
cipher982 / onnx_inference_yolov5.py
Created November 25, 2021 00:37
minimum reproducible Python example for YOLOv5 ONNX inference
# Setup imports
import os
from pathlib import Path
import cv2
from matplotlib import pyplot as plt
import numpy as np
import onnx
import onnxruntime
import torch
@cipher982
cipher982 / list_set_lookup_speed.py
Created December 12, 2019 19:56
List versus set lookup speed comparison
import random
import time
# Create lists of 100,000 random numbers each
random_list_1 = [random.randint(1,1e6) for i in range(int(1e5))]
random_list_2 = [random.randint(1,1e6) for i in range(int(1e5))]
start = time.time()
matching_numbers = 0
for number in random_list_1:
@cipher982
cipher982 / learn_soft_update_dqn.py
Created January 3, 2019 20:30
learn and soft update steps for the DQN
def learn(self, experiences, gamma):
"""
Update value parameters using given batch of experience tuples
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
@cipher982
cipher982 / sample_replay_buffer.py
Created January 3, 2019 20:22
replay buffer for pytorch DQN
def sample(self):
"""Randomly sample a batch of experiences from memory"""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
@cipher982
cipher982 / QNetwork.py
Created January 3, 2019 20:07
QNetwork
class QNetwork(nn.Module):
"""Actor (Policy) Model"""
def __init__(self, state_size, action_size, seed, fc1_units=64, fc2_units=64):
"""
Initialize parameters and build model
Params
======
state_size (int): Dimension of each state
// define the (x,y) points we will use for the planner
vector<double> next_x_vals;
vector<double> next_y_vals;
// start with previous path points from above
for (int i = 0; i < prev_size; i++)
{
next_x_vals.push_back(previous_path_x[i]);
next_y_vals.push_back(previous_path_y[i]);
}
// calculate how to break up spline points to travel at desired reference velocity
// create a spline
tk::spline s;
// set (x,y) points to the spline
s.set_points(ptsx, ptsy);
if (car_ahead)
{
cout << "Car Ahead!!!\n\n\n\n\n\n"
<< endl;
if (!car_left && lane > 0) // no left-car, yes left-lane
{
lane--; // Change lane left
}
else if (!car_right && lane != 2) // no right-car, yes right-lane
{
// estimate longitudinal position of a car (s)
check_car_s += ((double)prev_size * 0.02 * check_speed);
if (car_lane == lane) // if the other car is in 'lane' (our lane)
{
// is it within 30m ahead of us?
car_ahead |= check_car_s > car_s && check_car_s - car_s < 30;
}
else if (car_lane - lane == -1) // if left of us
{
if (d > 0 && d < 4) // d represents location in meters from left-most edge
{
car_lane = 0; // 0-4 meters = lane 0, (left-lane)
}
else if (d > 4 && d < 8)
{
car_lane = 1; // middle-lane
}
else if (d > 8 && d < 12)
{