Skip to content

Conversation

@y-lapeyre
Copy link
Collaborator

Bugged: 2 consecutive runs on the same machine do not yield the same result ....

@tdavidcl tdavidcl added the draft label Aug 26, 2025
@tdavidcl
Copy link
Member

Aside from the rest of the issue with non reproducibility it should also be added to the tests in the CI

In .github/workflows/shamrock-acpp-phys-test.yml:

  - worldsize: 1
    testfile: regression_godunov_soundwave_3d.py
  - worldsize: 2
    testfile: regression_godunov_soundwave_3d.py
+- worldsize: 1
+  testfile: regression_sph_mhd.py
+- worldsize: 2
+  testfile: regression_sph_mhd.py

@github-actions
Copy link

github-actions bot commented Nov 4, 2025

Workflow report

workflow report corresponding to commit 04331b8
Commiter email is [email protected]

Pre-commit check report

Some failures were detected in base source checks checks.
Check the On PR / Linting / Base source checks (pull_request) job in the tests for more detailled output

❌ ruff-check

Found 2 errors (2 fixed, 0 remaining).

❌ trailing-whitespace

Fixing doc/sphinx/examples/tests_ci/comp_phantom_orztang.py
Fixing doc/sphinx/examples/tests_ci/mhd_orzagtang.py

❌ black

�[1mreformatted doc/sphinx/examples/tests_ci/mhd_orzagtang.py�[0m
�[1mreformatted doc/sphinx/examples/tests_ci/comp_phantom_orztang.py�[0m

�[1mAll done! ✨ 🍰 ✨�[0m
�[34m�[1m2 files �[0m�[1mreformatted�[0m, �[34m139 files �[0mleft unchanged.

Suggested changes

Detailed changes :
diff --git a/doc/sphinx/examples/tests_ci/comp_phantom_orztang.py b/doc/sphinx/examples/tests_ci/comp_phantom_orztang.py
index d8880870..f040045a 100644
--- a/doc/sphinx/examples/tests_ci/comp_phantom_orztang.py
+++ b/doc/sphinx/examples/tests_ci/comp_phantom_orztang.py
@@ -8,9 +8,8 @@ This test is used to check that the Shamrock solver is able to reproduce the
 same results as Phantom.
 """
 
-import sarracen
-
 import numpy as np
+import sarracen
 
 import shamrock
 
@@ -27,12 +26,12 @@ def load_dataset(filename):
 
     print("Loading", filename)
     sdf = sarracen.read_phantom(filename)
-    tuple_of_xyz = (list(sdf['x']), list(sdf['y']), list(sdf['z']))
-    list_of_xyz  = [tuple(item) for item in zip(*tuple_of_xyz)] 
-    tuple_of_vxyz = (list(sdf['vx']), list(sdf['vy']), list(sdf['vz']))
-    list_of_vxyz  = [tuple(item) for item in zip(*tuple_of_vxyz)]
-    tuple_of_B = (list(sdf['Bx'] ), list(sdf['By']), list(sdf['Bz']))
-    list_of_B  = [tuple(item) for item in zip(*tuple_of_B)]
+    tuple_of_xyz = (list(sdf["x"]), list(sdf["y"]), list(sdf["z"]))
+    list_of_xyz = [tuple(item) for item in zip(*tuple_of_xyz)]
+    tuple_of_vxyz = (list(sdf["vx"]), list(sdf["vy"]), list(sdf["vz"]))
+    list_of_vxyz = [tuple(item) for item in zip(*tuple_of_vxyz)]
+    tuple_of_B = (list(sdf["Bx"]), list(sdf["By"]), list(sdf["Bz"]))
+    list_of_B = [tuple(item) for item in zip(*tuple_of_B)]
 
     dump = shamrock.load_phantom_dump(filename)
     dump.print_state()
@@ -48,7 +47,7 @@ def load_dataset(filename):
     cfg.set_artif_viscosity_None()
     cfg.set_IdealMHD(sigma_mhd=1, sigma_u=1)
     cfg.set_boundary_periodic()
-    cfg.set_eos_adiabatic(5./3.)
+    cfg.set_eos_adiabatic(5.0 / 3.0)
 
     model.set_solver_config(cfg)
 
@@ -59,7 +58,9 @@ def load_dataset(filename):
 
     model.init_from_phantom_dump(dump)
     ret = ctx.collect_data()
-    model.push_particle_mhd(list_of_xyz, list_of_vxyz,list(sdf['h']), list(sdf['u']), list_of_B, list(sdf['psi']))
+    model.push_particle_mhd(
+        list_of_xyz, list_of_vxyz, list(sdf["h"]), list(sdf["u"]), list_of_B, list(sdf["psi"])
+    )
 
     del model
     del ctx
@@ -112,8 +113,8 @@ def compare_datasets(istep, dataset1, dataset2):
     )
     axs[0, 1].scatter(dataset2["r"], dataset2["u"], s=smarker, c="black", rasterized=True)
     axs[1, 0].scatter(dataset2["r"], dataset2["vr"], s=smarker, c="black", rasterized=True)
-    #axs[1, 1].scatter(dataset2["r"], dataset2["alpha"], s=smarker, c="black", rasterized=True)
-#
+    # axs[1, 1].scatter(dataset2["r"], dataset2["alpha"], s=smarker, c="black", rasterized=True)
+    #
     axs[0, 0].set_ylabel(r"$\rho$")
     axs[1, 0].set_ylabel(r"$v_r$")
     axs[0, 1].set_ylabel(r"$u$")
@@ -134,7 +135,7 @@ def compare_datasets(istep, dataset1, dataset2):
     L2rho = L2diff_relat(dataset1["rho"], dataset1["xyz"], dataset2["rho"], dataset2["xyz"])
     L2u = L2diff_relat(dataset1["u"], dataset1["xyz"], dataset2["u"], dataset2["xyz"])
     L2vr = L2diff_relat(dataset1["vr"], dataset1["xyz"], dataset2["vr"], dataset2["xyz"])
-#
+    #
     print("L2r", L2r)
     print("L2rho", L2rho)
     print("L2u", L2u)
@@ -186,7 +187,7 @@ def compare_datasets(istep, dataset1, dataset2):
 
     plt.savefig("sedov_blast_phantom_comp_" + str(istep) + ".png")
 
-    #if error:
+    # if error:
     #    exit(
     #        f"Tolerances are not respected, got \n istep={istep}\n"
     #        + f" got: [{float(L2r)}, {float(L2rho)}, {float(L2u)}, {float(L2vr)}] \n"
@@ -194,6 +195,8 @@ def compare_datasets(istep, dataset1, dataset2):
     #        + f" delta : [{(L2r - expected_L2[istep][0])}, {(L2rho - expected_L2[istep][1])}, {(L2u - expected_L2[istep][2])}, {(L2vr - expected_L2[istep][3])}]\n"
     #        + f" tolerance : [{tols[istep][0]}, {tols[istep][1]}, {tols[istep][2]}, {tols[istep][3]}, {tols[istep][4]}]"
     #    )
+
+
 #
 
 step0000 = load_dataset("/Users/ylapeyre/Documents/githubrepos/Clover/orztang_00000")
@@ -202,12 +205,12 @@ step0010 = load_dataset("/Users/ylapeyre/Documents/githubrepos/Clover/orztang_00
 
 filename_start = "/Users/ylapeyre/Documents/githubrepos/Clover/orztang_00000"
 sdf = sarracen.read_phantom(filename_start)
-tuple_of_xyz = (list(sdf['x']), list(sdf['y']), list(sdf['z']))
-list_of_xyz  = [tuple(item) for item in zip(*tuple_of_xyz)] 
-tuple_of_vxyz = (list(sdf['vx']), list(sdf['vy']), list(sdf['vz']))
-list_of_vxyz  = [tuple(item) for item in zip(*tuple_of_vxyz)]
-tuple_of_B = (list(sdf['Bx'] ), list(sdf['By']), list(sdf['Bz']))
-list_of_B  = [tuple(item) for item in zip(*tuple_of_B)]
+tuple_of_xyz = (list(sdf["x"]), list(sdf["y"]), list(sdf["z"]))
+list_of_xyz = [tuple(item) for item in zip(*tuple_of_xyz)]
+tuple_of_vxyz = (list(sdf["vx"]), list(sdf["vy"]), list(sdf["vz"]))
+list_of_vxyz = [tuple(item) for item in zip(*tuple_of_vxyz)]
+tuple_of_B = (list(sdf["Bx"]), list(sdf["By"]), list(sdf["Bz"]))
+list_of_B = [tuple(item) for item in zip(*tuple_of_B)]
 dump = shamrock.load_phantom_dump(filename_start)
 dump.print_state()
 
@@ -221,7 +224,7 @@ cfg = model.gen_config_from_phantom_dump(dump)
 cfg.set_artif_viscosity_None()
 cfg.set_IdealMHD(sigma_mhd=1, sigma_u=1)
 cfg.set_boundary_periodic()
-cfg.set_eos_adiabatic(5./3.)
+cfg.set_eos_adiabatic(5.0 / 3.0)
 model.set_solver_config(cfg)
 # Print the solver config
 model.get_current_config().print_status()
@@ -229,7 +232,9 @@ model.get_current_config().print_status()
 model.init_scheduler(split, 1)
 
 model.init_from_phantom_dump(dump)
-model.push_particle_mhd(list_of_xyz, list_of_vxyz,list(sdf['h']), list(sdf['u']), list_of_B, list(sdf['psi']))
+model.push_particle_mhd(
+    list_of_xyz, list_of_vxyz, list(sdf["h"]), list(sdf["u"]), list_of_B, list(sdf["psi"])
+)
 pmass = model.get_particle_mass()
 
 
diff --git a/doc/sphinx/examples/tests_ci/mhd_orzagtang.py b/doc/sphinx/examples/tests_ci/mhd_orzagtang.py
index d16561c0..bbf9e9af 100644
--- a/doc/sphinx/examples/tests_ci/mhd_orzagtang.py
+++ b/doc/sphinx/examples/tests_ci/mhd_orzagtang.py
@@ -8,11 +8,11 @@ This test is used to check that the Shamrock solver is able to reproduce the
 same results as Phantom.
 """
 
-
-import shamrock
 import matplotlib.pyplot as plt
 import numpy as np
 
+import shamrock
+
 ################################################
 ################### PARAMETERS #################
 ################################################
@@ -27,17 +27,17 @@ nz = 12
 
 xm = -0.5
 ym = -0.5
-bmin = (-0.5, -0.5, -2*np.sqrt(6)/nx)
-bmax = (0.5, 0.5, 2*np.sqrt(6)/nx)
+bmin = (-0.5, -0.5, -2 * np.sqrt(6) / nx)
+bmax = (0.5, 0.5, 2 * np.sqrt(6) / nx)
 
 ############# initial conditions ##########
 pmass = -1
-B0 = 1. / np.sqrt(4*np.pi)
-v0 = 1.
-cs0 = 1.
-gamma = 5./3.
+B0 = 1.0 / np.sqrt(4 * np.pi)
+v0 = 1.0
+cs0 = 1.0
+gamma = 5.0 / 3.0
 M0 = v0 / cs0
-beta0 = 10./3.
+beta0 = 10.0 / 3.0
 P0 = 0.5 * B0 * B0 * beta0
 rho0 = gamma * P0 * M0
 
@@ -89,7 +89,7 @@ model.resize_simulation_box(bmin, bmax)
 model.add_cube_fcc_3d(dr, (-xs / 2, -ys / 2, -zs / 2), (xs / 2, ys / 2, zs / 2))
 
 vol_b = xs * ys * zs
-totmass = (rho0 * vol_b)
+totmass = rho0 * vol_b
 print("Total mass :", totmass)
 
 pmass = model.total_mass_to_part_mass(totmass)
@@ -105,9 +105,9 @@ def B_func(r):
     x, y, z = r
     xp = x - xm
     yp = y - ym
-    Bx = -B0 * np.sin(2*np.pi * yp) 
-    By = B0 * np.sin(4*np.pi * xp) 
-    Bz = 0.0 
+    Bx = -B0 * np.sin(2 * np.pi * yp)
+    By = B0 * np.sin(4 * np.pi * xp)
+    Bz = 0.0
     return (Bx, By, Bz)
 
 
@@ -118,9 +118,9 @@ def vel_func(r):
     x, y, z = r
     xp = x - xm
     yp = y - ym
-    vx = -v0 * np.sin(2*np.pi * yp) 
-    vy =  v0 * np.sin(2*np.pi * xp) 
-    vz = 0.0 
+    vx = -v0 * np.sin(2 * np.pi * yp)
+    vy = v0 * np.sin(2 * np.pi * xp)
+    vz = 0.0
     return (vx, vy, vz)
 
 
@@ -140,13 +140,13 @@ print("Current part mass :", pmass)
 model.set_particle_mass(pmass)
 
 
-#tot_u = pmass * model.get_sum("uint", "f64")
+# tot_u = pmass * model.get_sum("uint", "f64")
 
 model.set_cfl_cour(C_cour)
 model.set_cfl_force(C_force)
 
 t_sum = 0
-t_target = 1.
+t_target = 1.0
 i_dump = 0
 dt_dump = 0.1 * t_target
 next_dt_target = t_sum + dt_dump

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

Projects

None yet

Development

Successfully merging this pull request may close these issues.

2 participants