Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Draft] [debug] debug 20Msun crash #753

Draft
wants to merge 11 commits into
base: main
Choose a base branch
from
10 changes: 10 additions & 0 deletions net/private/net_approx21.f90
Original file line number Diff line number Diff line change
Expand Up @@ -1683,7 +1683,17 @@ subroutine approx21_eps_info( &
xx = xx + a1*a2
end do
eps_total_q = -m3(avo,clight,clight) * xx
! XXX error here??? eps_total is really negative?
eps_total = eps_total_q
! if (n% zone >= 1816 .and. n% zone <= 1819) then
! xx = 0.0_qp
! do i=1,species(plus_co56)
! a1 = dydt(i)
! a2 = mion(i)
! xx = xx + a1*a2
! write(*,*) n% zone, eps_total, i, a1, a2, xx
! end do
! end if

fe56ec_fake_factor = eval_fe56ec_fake_factor( &
n% g% fe56ec_fake_factor, n% g% min_T_for_fe56ec_fake_factor, n% temp)
Expand Down
2 changes: 2 additions & 0 deletions net/private/net_eval.f90
Original file line number Diff line number Diff line change
Expand Up @@ -384,6 +384,8 @@ subroutine eval_net_approx21_procs(n,just_dxdt, ierr)

if (ierr /= 0) return
n% eps_nuc = n% eps_total - n% eps_neu_total
! XXX test:
!n% eps_nuc = n% eps_total

do i=1, num_isos
n% dxdt(i) = chem_isos% Z_plus_N(g% chem_id(i)) * n% dydt1(i)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
plots/* filter=lfs diff=lfs merge=lfs -text
photos/* filter=lfs diff=lfs merge=lfs -text
45 changes: 45 additions & 0 deletions star/dev_cases_test_solver/test_20M_near_cc_approx21/README.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
.. _20M_pre_ms_to_core_collapse:

***************************
20M_pre_ms_to_core_collapse
***************************

This test suite evolves a solar metalicity 20 |MSun| model from the pre-ms to core collapse.
For bit for bit convergence, we recomended to run by using the ./run_all script instead of restarting from models,
see https://github.com/MESAHub/mesa/issues/610.

This test_suite has been tested up to 80 solar masses, up to solar metallicity, with mass loss, and produces reasonable HR-tracks.
Note that for higher masses at solar metallicity, some combination of Pextra_factor, mass-loss, and/or superadiabatic convection reduction (e.g. mlt++)
might be necessary to stabilize the surface and avoid numerical issues. See the 80Msun_zams_to_cc test_suite as an example.

For production science we recommend adopting tighter mesh and timestep controls, such as those suggested in the comments of inlist_common.

Physical checks
===============

None

Inlists
=======

This test case has seven parts.

* Part 1 (``inlist_make_late_pre_zams``) creates a 20 |Msun|, Z=1.42*10^-2 metallicity, pre-main sequence model and evolves it for 100 years.

* Part 2 (``inlist_to_zams``) evolves the model to the zero age main sequence.

* Part 3 (``inlist_to_end_core_he_burn``) takes the model to core helium depletion.

* Part 4 (``inlist_remove_envelope``) removes the remianing hydrogen envelope. (optional)

* Part 5 (``inlist_to_end_core_c_burn``) takes the model to core carbon depletion.

* Part 6 (``inlist_to_lgTmax``) evolves the model until the core temperature reaches log T =9.60 (approximately silicon-shell burning)

* Part 7 (``inlist_to_cc``) evolves until core collapse.




Last-Updated: 18Dec2023 by EbF

Original file line number Diff line number Diff line change
@@ -0,0 +1,174 @@
import numpy as np
from scipy.sparse import lil_matrix, csc_matrix
from scipy.sparse.linalg import splu, lsqr, lsmr, eigsh

###############################################################################
# 1) Parsing functions
###############################################################################
def parse_dblk_file(filename, nvar, nz):
dblk = np.zeros((nz, nvar, nvar), dtype=np.float64)
with open(filename, 'r') as f:
lines = f.readlines()
idx = 1
zone_count = 0
while idx < len(lines) and zone_count < nz:
zone_line = lines[idx].strip()
idx += 1
if not zone_line.startswith("Zone:"):
raise ValueError(f"Expected 'Zone:' line, got: {zone_line}")
for i in range(nvar):
float_line = lines[idx].strip()
idx += 1
row_vals = float_line.split()
if len(row_vals) < nvar:
raise ValueError(f"Not enough floats in line: {float_line}")
dblk[zone_count, i, :] = np.array(row_vals[:nvar], dtype=np.float64)
zone_count += 1
if zone_count != nz:
raise ValueError(f"Finished parsing but did not find all {nz} zones in {filename}.")
return dblk

def parse_ublk_file(filename, nvar, nz):
ublk = np.zeros((nz, nvar, nvar), dtype=np.float64)
with open(filename, 'r') as f:
lines = f.readlines()
idx = 1
zone_count = 0
while idx < len(lines) and zone_count < (nz - 1):
zone_line = lines[idx].strip()
idx += 1
if not zone_line.startswith("Zone:"):
raise ValueError(f"Expected 'Zone:' line, got: {zone_line}")
for i in range(nvar):
float_line = lines[idx].strip()
idx += 1
row_vals = float_line.split()
if len(row_vals) < nvar:
raise ValueError(f"Not enough floats in line: {float_line}")
ublk[zone_count, i, :] = np.array(row_vals[:nvar], dtype=np.float64)
zone_count += 1
if zone_count != (nz - 1):
raise ValueError(f"Didn't read all upper blocks (expected {nz-1}).")
return ublk

def parse_lblk_file(filename, nvar, nz):
lblk = np.zeros((nz, nvar, nvar), dtype=np.float64)
with open(filename, 'r') as f:
lines = f.readlines()
idx = 1
zone_count = 1
while idx < len(lines) and zone_count < nz:
zone_line = lines[idx].strip()
idx += 1
if not zone_line.startswith("Zone:"):
raise ValueError(f"Expected 'Zone:' line, got: {zone_line}")
for i in range(nvar):
float_line = lines[idx].strip()
idx += 1
row_vals = float_line.split()
if len(row_vals) < nvar:
raise ValueError(f"Not enough floats in line: {float_line}")
lblk[zone_count, i, :] = np.array(row_vals[:nvar], dtype=np.float64)
zone_count += 1
if zone_count != nz:
raise ValueError(f"Didn't read all lower blocks (expected zones up to {nz}).")
return lblk

def parse_residuals_file(filename, nvar, nz):
b = np.zeros(nz * nvar, dtype=np.float64)
with open(filename, 'r') as f:
lines = f.readlines()
idx = 1
zone_count = 0
while idx < len(lines) and zone_count < nz:
zone_line = lines[idx].strip()
idx += 1
if not zone_line.startswith("Zone:"):
raise ValueError(f"Expected 'Zone:' line, got: {zone_line}")
float_line = lines[idx].strip()
idx += 1
row_vals = float_line.split()
if len(row_vals) < nvar:
raise ValueError(f"Not enough floats in line: {float_line}")
offset = zone_count * nvar
b[offset:offset + nvar] = np.array(row_vals[:nvar], dtype=np.float64)
zone_count += 1
if zone_count != nz:
raise ValueError(f"Did not read all {nz} zones in residuals file.")
return b

###############################################################################
# 2) Build the global sparse matrix
###############################################################################
def build_sparse_matrix_from_blocks(dblk, ublk, lblk, nvar, nz):
size = nvar * nz
J_lil = lil_matrix((size, size), dtype=np.float64)
for k in range(nz):
row_block = k * nvar
col_block = k * nvar
J_lil[row_block:row_block + nvar, col_block:col_block + nvar] = dblk[k]
if k < nz - 1:
J_lil[row_block:row_block + nvar,
col_block + nvar:col_block + 2 * nvar] = ublk[k]
if k > 0:
J_lil[row_block:row_block + nvar,
col_block - nvar:col_block] = lblk[k]
return J_lil.tocsc()

###############################################################################
# 3) Main routine / driver
###############################################################################
def main():
file_dblk = 'dblk_output.txt'
file_ublk = 'ublk_output.txt'
file_lblk = 'lblk_output.txt'
file_res = 'residuals_output.txt'
file_sol = 'B_output.txt'

nvar = 27
nz = 1842

print("Parsing block files...")
dblk = parse_dblk_file(file_dblk, nvar, nz)
ublk = parse_ublk_file(file_ublk, nvar, nz)
lblk = parse_lblk_file(file_lblk, nvar, nz)

print("Parsing residuals...")
b = parse_residuals_file(file_res, nvar, nz)
sol = parse_residuals_file(file_sol, nvar, nz)

print("Building sparse matrix J...")
J = build_sparse_matrix_from_blocks(dblk, ublk, lblk, nvar, nz)

print("Matrix shape:", J.shape)
print("Non-zero entries:", J.nnz)

# Compute matrix condition number
print("\nComputing condition number...")
ew1, ev = eigsh(J, which='LM')
ew2, ev = eigsh(J, sigma=1e-8) #<--- takes a long time
ew1 = abs(ew1)
ew2 = abs(ew2)

cond = ew1.max()/ew2.min()
print("Matrix condition number:", cond )

# Calculate MESA residual norm
print("\nComputing MESA residual norm...")
r_mesa = J @ sol - b
rnorm_mesa = np.linalg.norm(r_mesa)
print(f"MESA residual norm = {rnorm_mesa:e}")

# Print residuals for zone 1817
zone_of_interest = 1817
zone_idx = zone_of_interest - 1 # Convert to 0-based indexing
offset = zone_idx * nvar
r_zone = r_mesa[offset:offset + nvar]
print(f"\nResiduals for all variables in zone {zone_of_interest}:")
for i, val in enumerate(r_zone, start=1):
print(f" Variable {i:2d}: {val:24.16e}")

if __name__ == "__main__":
main()


7 changes: 7 additions & 0 deletions star/dev_cases_test_solver/test_20M_near_cc_approx21/ck
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/bash

# this provides the definition of check_one
# check_one
source "${MESA_DIR}/star/test_suite/test_suite_helpers"

check_one
4 changes: 4 additions & 0 deletions star/dev_cases_test_solver/test_20M_near_cc_approx21/clean
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#!/bin/bash

cd make
make clean
Loading