Skip to content

Instantly share code, notes, and snippets.

@ludvary
Created September 18, 2024 17:03
Show Gist options
  • Save ludvary/ef3e5eff071abd9ede4047f6e43e6df0 to your computer and use it in GitHub Desktop.
Save ludvary/ef3e5eff071abd9ede4047f6e43e6df0 to your computer and use it in GitHub Desktop.
L=1024 takes order of magnitude more time to run as compared to L=512
#include <fstream>
#include <cmath>
#include <random>
#include <array>
#include <vector>
#include <iostream>
#include <omp.h>
#include "../include/constants.hpp"
#include "../include/glauber.hpp"
#include "print.hpp"
double get_real_ran(std::mt19937& mt_rng, std::uniform_real_distribution<double>& dist_real){
return dist_real(mt_rng);
}
int get_int_ran(std::mt19937& mt_rng, std::uniform_int_distribution<int>& dist_int){
return dist_int(mt_rng);
}
// double del_E;
void one_mcstep(std::vector<std::vector<int>>& lat, std::mt19937& mt_rng, std::uniform_real_distribution<double>& dist_real, std::uniform_int_distribution<int>& dist_int, double& E, int& M, const double T, std::vector<std::vector<double>>& field_matrix, const std::array<std::array<double, L>, L>& J_mat){
// std::mt19937 thread_rng(mt_rng() + omp_get_thread_num());
// std::uniform_real_distribution<double> thread_dist_real(0, 1);
// std::uniform_int_distribution<int> thread_dist_int(0, L - 1);
// double del_E;
// #pragma omp for collapse(1) private(del_E) reduction(+:E, M) schedule(dynamic)
for (int m=1; m<=N; m++){
double del_E=0;
double prob;
double rand;
int p = get_int_ran(mt_rng, dist_int);
int q = get_int_ran(mt_rng, dist_int);
// for (int i=0; i<L; i++){
// for (int j=0; j<L; j++){
// del_E += 2*J_mat[abs(i-p)][abs(j-q)] * lat[i][j];
// }
// }
// del_E *= lat[p][q];
del_E = 2 * lat[p][q] * field_matrix[p][q];
// std::cout << E << std::endl;
if (del_E<0){
lat[p][q] = -lat[p][q];
E += del_E;
update_field_matrix(field_matrix, J_mat, lat[p][q], p, q);
M += -2*lat[p][q];
}
else{
prob = std::exp(-del_E/T);
rand = get_real_ran(mt_rng, dist_real);
if (rand<prob){
lat[p][q] = -lat[p][q];
E += del_E;
update_field_matrix(field_matrix, J_mat, lat[p][q], p, q);
M += -2*lat[p][q];
}
}
}
}
std::vector<std::vector<double>> get_field_matrix(const std::array<std::array<double, L>, L>& J_mat, const std::vector<std::vector<int>>& lattice){
std::vector<std::vector<double>> field_matrix(L, std::vector<double>(L));
// std::ofstream check_dat("./check1.dat");
#pragma omp parallel for collapse(2)
for (auto i=0; i<L; i++){
for (auto j=0; j<L; j++){
for (auto k=0; k<L; k++){
int abs_ik = abs(i-k);
for (auto l=0; l<L; l++){
int abs_jl = abs(j-l);
field_matrix[i][j] += J_mat[abs_ik][abs_jl] * lattice[k][l];
}
}
}
}
// for (auto i=0; i<L; i++){
// for (auto j=0; j<L; j++){
// check_dat << field_matrix[i][j] << std::endl;
// }
// }
return field_matrix;
}
void update_field_matrix(std::vector<std::vector<double>>& field_matrix, const std::array<std::array<double, L>, L>& J_mat, const int S_pq, const int p, const int q){
#pragma omp parallel for collapse(1)
for (int i=0; i<L; i++){
for (int j=0; j<L; j++){
field_matrix[i][j] += 2 * S_pq * J_mat[abs(i-p)][abs(j-q)];
}
}
}
double find_energy(const std::array<std::array<double, L>, L>& J_mat, std::vector<std::vector<int>>& lattice, const std::vector<std::vector<double>>& field_matrix){
double energy = 0;
// #pragma omp parallel for collapse(2) reduction(+:energy)
// for (auto l=0; l<L; l++){
// for (auto m=0; m<L; m++){
// for (auto i=0; i<L; i++){
// for (auto j=0; j<L; j++){
// energy -= J_mat[abs(l-i)][abs(m-j)] * lattice[l][m] * lattice[i][j];
// }
// }
// }
// }
for (auto l=0; l<L; l++){
for (auto n=0; n<L; n++){
energy -= field_matrix[l][n] * lattice[l][n];
}
}
return energy/2;
}
int find_magnetization(const std::vector<std::vector<int>>& lat){
double m = 0;
for (int i=0; i<L; i++){
for (int j=0; j<L; j++){
m += lat[i][j];
}
}
return m;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment