Skip to content

Instantly share code, notes, and snippets.

@dvalters
Last active April 22, 2018 12:19
Show Gist options
  • Save dvalters/f20ea83d1fe15fbeea08b6ad33272721 to your computer and use it in GitHub Desktop.
Save dvalters/f20ea83d1fe15fbeea08b6ad33272721 to your computer and use it in GitHub Desktop.
Experiments 3D map Vis. LSDMapping Tools
# Author: S. Chris Colbert <sccolbert@gmail.com>
# Copyright (c) 2009, S. Chris Colbert
# License: BSD Style
from __future__ import print_function
# this import is here because we need to ensure that matplotlib uses the
# wx backend and having regular code outside the main block is PyTaboo.
# It needs to be imported first, so that matplotlib can impose the
# version of Wx it requires.
##import wxversion
##wxversion.select('3.0')
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
os.environ['QT_API'] = 'pyqt'
import matplotlib
matplotlib.use('Qt4Agg')
import pylab as pl
import numpy as np
from mayavi import mlab
from mayavi.core.ui.mayavi_scene import MayaviScene
def get_world_to_view_matrix(mlab_scene):
"""returns the 4x4 matrix that is a concatenation of the modelview transform and
perspective transform. Takes as input an mlab scene object."""
if not isinstance(mlab_scene, MayaviScene):
raise TypeError('argument must be an instance of MayaviScene')
# The VTK method needs the aspect ratio and near and far clipping planes
# in order to return the proper transform. So we query the current scene
# object to get the parameters we need.
scene_size = tuple(mlab_scene.get_size())
clip_range = mlab_scene.camera.clipping_range
aspect_ratio = float(scene_size[0])/float(scene_size[1])
# this actually just gets a vtk matrix object, we can't really do anything with it yet
vtk_comb_trans_mat = mlab_scene.camera.get_composite_projection_transform_matrix(
aspect_ratio, clip_range[0], clip_range[1])
# get the vtk mat as a numpy array
np_comb_trans_mat = vtk_comb_trans_mat.to_array()
return np_comb_trans_mat
def get_view_to_display_matrix(mlab_scene):
""" this function returns a 4x4 matrix that will convert normalized
view coordinates to display coordinates. It's assumed that the view should
take up the entire window and that the origin of the window is in the
upper left corner"""
if not (isinstance(mlab_scene, MayaviScene)):
raise TypeError('argument must be an instance of MayaviScene')
# this gets the client size of the window
x, y = tuple(mlab_scene.get_size())
# normalized view coordinates have the origin in the middle of the space
# so we need to scale by width and height of the display window and shift
# by half width and half height. The matrix accomplishes that.
view_to_disp_mat = np.array([[x/2.0, 0., 0., x/2.0],
[ 0., -y/2.0, 0., y/2.0],
[ 0., 0., 1., 0.],
[ 0., 0., 0., 1.]])
return view_to_disp_mat
def apply_transform_to_points(points, trans_mat):
"""a function that applies a 4x4 transformation matrix to an of
homogeneous points. The array of points should have shape Nx4"""
if not trans_mat.shape == (4, 4):
raise ValueError('transform matrix must be 4x4')
if not points.shape[1] == 4:
raise ValueError('point array must have shape Nx4')
return np.dot(trans_mat, points.T).T
if __name__ == '__main__':
f = mlab.figure()
N = 4
# create a few points in 3-space
X = np.random.random_integers(-3, 3, N)
Y = np.random.random_integers(-3, 3, N)
Z = np.random.random_integers(-3, 3, N)
# plot the points with mlab
pts = mlab.points3d(X, Y, Z)
# now were going to create a single N x 4 array of our points
# adding a fourth column of ones expresses the world points in
# homogenous coordinates
W = np.ones(X.shape)
hmgns_world_coords = np.column_stack((X, Y, Z, W))
# applying the first transform will give us 'unnormalized' view
# coordinates we also have to get the transform matrix for the
# current scene view
comb_trans_mat = get_world_to_view_matrix(f.scene)
view_coords = \
apply_transform_to_points(hmgns_world_coords, comb_trans_mat)
# to get normalized view coordinates, we divide through by the fourth
# element
norm_view_coords = view_coords / (view_coords[:, 3].reshape(-1, 1))
# the last step is to transform from normalized view coordinates to
# display coordinates.
view_to_disp_mat = get_view_to_display_matrix(f.scene)
disp_coords = apply_transform_to_points(norm_view_coords, view_to_disp_mat)
# at this point disp_coords is an Nx4 array of homogenous coordinates
# where X and Y are the pixel coordinates of the X and Y 3D world
# coordinates, so lets take a screenshot of mlab view and open it
# with matplotlib so we can check the accuracy
img = mlab.screenshot()
pl.imshow(img)
for i in range(N):
print('Point %d: (x, y) ' % i, disp_coords[:, 0:2][i])
pl.plot([disp_coords[:, 0][i]], [disp_coords[:, 1][i]], 'ro')
pl.show()
# you should check that the printed coordinates correspond to the
# proper points on the screen
mlab.show()
#EOF

Some experimental plots

from osgeo import gdal
from mayavi import mlab
import numpy as np
import numpy.ma as ma
ds = gdal.Open('/mnt/SCRATCH/Dev/ExampleTopoDatasets/indian_creek.bil')
#ds = gdal.Open('/run/media/dav/SHETLAND/Analyses/HydrogeomorphPaper/peak_flood_maps/boscastle/peak_flood/Elevations0.asc')
ds_waterd = gdal.Open('/run/media/dav/SHETLAND/Analyses/HydrogeomorphPaper/peak_flood_maps/boscastle/peak_flood/WaterDepths2400_GRIDDED_HYDRO.asc')
data_waterd = ds_waterd.ReadAsArray()
data = ds.ReadAsArray()
ndv = -9999
#masked_data = ma.masked_where(data == ndv, data)
nodata_mask = data == ndv
data[nodata_mask] = np.nan
nowater_mask = data_waterd == 0.0
data_waterd[nowater_mask] = np.nan
mlab.figure(size=(640, 800), bgcolor=(0.16, 0.28, 0.46))
mlab.surf(data, warp_scale=0.4, colormap='gist_gray')
mlab.surf(data_waterd, warp_scale=0.4, colormap='Blues')
mlab.show()
# Requires the mayavi package and subsequent dependencies
# # Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Mods: DAV
# Retrieve the grand Canyon topological data ##################################
import os
# Original file:
#'ftp://e0srp01u.ecs.nasa.gov/srtm/version2/SRTM1/Region_04/N36W113.hgt.zip'
if not os.path.exists('N36W113.hgt.zip'):
# Download the data
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
print('Downloading data, please wait (10M)')
opener = urlopen(
'https://s3.amazonaws.com/storage.enthought.com/www/sample_data/N36W113.hgt.zip'
)
open('N36W113.hgt.zip', 'wb').write(opener.read())
# Load the data (signed 2 byte integers, big endian) ##########################
import zipfile
import numpy as np
data = np.fromstring(zipfile.ZipFile('N36W113.hgt.zip').read('N36W113.hgt'),
'>i2')
data.shape = (3601, 3601)
data = data.astype(np.float32)
# Plot an interesting section #################################################
from mayavi import mlab
data = data[:1000, 900:1900]
# Convert missing values into something more sensible.
data[data == -32768] = data[data > 0].min()
mlab.figure(size=(400, 320), bgcolor=(0.16, 0.28, 0.46))
mlab.surf(data, colormap='gist_earth', warp_scale=0.1,
vmin=1200, vmax=1610)
# The data takes a lot of memory, and the surf command has created a
# copy. We free the inital memory.
del data
# A view of the canyon
mlab.view(-5.9, 83, 570, [5.3, 20, 238])
mlab.show()
View raw

(Sorry about that, but we can’t show files that are this big right now.)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment