2024.05.18 22:06 Duemellon Custom Node issue... AI generated code
RETURN_TYPES = ("IMAGE", "IMAGE", "MASK") RETURN_NAMES = ("original_image", "cropped_image", "mask") FUNCTION = "load_image" CATEGORY = "image" def load_image(self, image, crop_size_mult, bbox_smooth_alpha): image_path = folder_paths.get_annotated_filepath(image) img = node_helpers.pillow(Image.open, image_path) output_images = [] output_masks = [] w, h = None, None excluded_formats = ['MPO'] for i in ImageSequence.Iterator(img): i = node_helpers.pillow(ImageOps.exif_transpose, i) if i.mode == 'I': i = i.point(lambda i: i * (1 / 255)) image = i.convert("RGB") if len(output_images) == 0: w = image.size[0] h = image.size[1] if image.size[0] != w or image.size[1] != h: continue image = np.array(image).astype(np.float32) / 255.0 image = torch.from_numpy(image)[None,] if 'A' in i.getbands(): mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 mask = 1. - torch.from_numpy(mask) else: mask = torch.zeros((64,64), dtype=torch.float32, device="cpu") output_images.append(image) output_masks.append(mask.unsqueeze(0)) if len(output_images) > 1 and img.format not in excluded_formats: original_image = torch.cat(output_images, dim=0) original_mask = torch.cat(output_masks, dim=0) else: original_image = output_images[0] original_mask = output_masks[0] # BatchCropFromMask logic masks = original_mask self.max_bbox_width = 0 self.max_bbox_height = 0 # Calculate the maximum bounding box size across the mask curr_max_bbox_width = 0 curr_max_bbox_height = 0 _mask = ToPILImage()(masks[0]) non_zero_indices = np.nonzero(np.array(_mask)) min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) width = max_x - min_x height = max_y - min_y curr_max_bbox_width = max(curr_max_bbox_width, width) curr_max_bbox_height = max(curr_max_bbox_height, height) # Smooth the changes in the bounding box size self.max_bbox_width = self.smooth_bbox_size(self.max_bbox_width, curr_max_bbox_width, bbox_smooth_alpha) self.max_bbox_height = self.smooth_bbox_size(self.max_bbox_height, curr_max_bbox_height, bbox_smooth_alpha) # Apply the crop size multiplier self.max_bbox_width = round(self.max_bbox_width * crop_size_mult) self.max_bbox_height = round(self.max_bbox_height * crop_size_mult) bbox_aspect_ratio = self.max_bbox_width / self.max_bbox_height # Crop the image based on the mask non_zero_indices = np.nonzero(np.array(_mask)) min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) # Calculate center of bounding box center_x = np.mean(non_zero_indices[1]) center_y = np.mean(non_zero_indices[0]) curr_center = (round(center_x), round(center_y)) # Initialize prev_center with curr_center if not hasattr(self, 'prev_center'): self.prev_center = curr_center # Smooth the changes in the center coordinates center = self.smooth_center(self.prev_center, curr_center, bbox_smooth_alpha) # Update prev_center for the next frame self.prev_center = center # Create bounding box using max_bbox_width and max_bbox_height half_box_width = round(self.max_bbox_width / 2) half_box_height = round(self.max_bbox_height / 2) min_x = max(0, center[0] - half_box_width) max_x = min(original_image.shape[1], center[0] + half_box_width) min_y = max(0, center[1] - half_box_height) max_y = min(original_image.shape[0], center[1] + half_box_height) # Crop the image from the bounding box cropped_img = original_image[0, min_y:max_y, min_x:max_x, :] # Calculate the new dimensions while maintaining the aspect ratio new_height = min(cropped_img.shape[0], self.max_bbox_height) new_width = round(new_height * bbox_aspect_ratio) # Resize the image resize_transform = Resize((new_height, new_width)) resized_img = resize_transform(cropped_img.permute(2, 0, 1)) # Perform the center crop to the desired size crop_transform = CenterCrop((self.max_bbox_height, self.max_bbox_width)) cropped_resized_img = crop_transform(resized_img) cropped_image = cropped_resized_img.permute(1, 2, 0).unsqueeze(0) return (original_image, cropped_image, original_mask) def smooth_bbox_size(self, prev_bbox_size, curr_bbox_size, alpha): if alpha == 0: return prev_bbox_size return round(alpha * curr_bbox_size + (1 - alpha) * prev_bbox_size) def smooth_center(self, prev_center, curr_center, alpha=0.5): if alpha == 0: return prev_center return ( round(alpha * curr_center[0] + (1 - alpha) * prev_center[0]), round(alpha * curr_center[1] + (1 - alpha) * prev_center[1]) ) @classmethod def IS_CHANGED(s, image): image_path = folder_paths.get_annotated_filepath(image) m = hashlib.sha256() with open(image_path, 'rb') as f: m.update(f.read()) return m.digest().hex() @classmethod def VALIDATE_INPUTS(s, image): if not folder_paths.exists_annotated_filepath(image): return "Invalid image file: {}".format(image) return TrueNODE_CLASS_MAPPINGS = { "LoadImageAndCrop": LoadImageAndCrop }
2024.05.17 20:16 doodler_tech Java Mocha 4.5.8 Distance in Kilometers
2024.05.16 23:50 Hel-Low5302 Can anyone help me figure out why my simulation data is not going through the output port?
reg [31:0] y, y_init, u; reg [31:0] x_curr, x_curr0, x_curr1, x_curr2, x_next, x_next2, x_next3; reg [31:0] e_pre_var, e_next_var, e_next_var1, e_next_var2; reg [31:0] K_pre, K_next, one_K_next_sq, K_next_sq; // kalman gain initial begin y_init = 32'b0; x_curr = 32'b0; x_curr0 = 32'b0; x_curr1 = 32'b0; x_curr2 = 32'b0; x_next = 32'b0; x_next2 = 32'b0; x_next3 = 32'b0; e_pre_var = 32'b0; e_next_var = 32'b0; e_next_var1 = 32'b0; e_next_var2 = 32'b0; K_pre = 32'b0; K_next = 32'b0; one_K_next_sq = 32'b0; K_next_sq = 32'b0; end always@(posedge clk) begin if (counter == 0) begin y_init = y_measured; end if (counter == 1) begin x_next2 = k_omega*y_init; x_next3 = oT*x_next2; end if (counter == 2) begin x_next = x_next1 - x_next3; // predict the next state x_n e_pre_var = e_pre_var0; // make sure this is a positive number end if (counter_eq_1024) begin y = y_measured; u = -oT*k_omega*y; end if (counter1024 == 1) begin K_next_num = (phi_sq*e_pre_var + d_var); K_next_denom = (phi_sq*e_pre_var + d_var + s_var); // K_next = K_next_num / K_next_denom; M_AXIS_OUT_tvalid_kal = 1'b1; // send the numerator and denominator values to the Divider Generator K_next = K_next_in; end if (counter1024 == 2) begin x_curr0 = 1-K_next; x_curr1 = x_curr0*x_next; x_curr2 = K_next*y; end if (counter1024 == 3) begin x_curr = x_curr1 + x_curr2; // estimate the current state x_next = phi*x_curr + u; // predict the next state end if (counter1024 == 4) begin //e_next_var = (1-K_next)*(1-K_next)*(phi*phi*e_pre_var + d_var) + K_next*K_next*s_var; one_K_next_sq = (1-K_next)*(1-K_next); K_next_sq = K_next*K_next; end if(counter1024 == 5) begin e_next_var1 = one_K_next_sq*K_next_num; e_next_var2 = K_next_sq*s_var; e_next_var = e_next_var1 + e_next_var2; end if (counter1024 == 6) begin // make the next values the old values for next cycle K_pre = K_next; e_pre_var = e_next_var; // Assign the calculated value to the output signal M_AXIS_OUT_tdata = x_next; // Store sampled data in memory x_data = x_next; M_AXIS_OUT_tvalid_kal = 1'b0; // stop data flow to Divider Generator end end
2024.05.15 18:50 Spooker0 Grass Eaters 52 Just Passing Through
2024.05.15 05:20 kayenano The Villainess Is An SS+ Rank Adventurer: Chapter 239
2024.05.14 12:02 KingAroan [Help] - External monitors not recognized in Wayland or X11 after upgrade
May 14 10:46:41 redacted kwin_wayland[4513]: kf.svg: The theme "Sweet" uses the legacy metadata.desktop. Consider contacting the author and asking them update it to use the newer JSON format. May 14 10:46:45 redacted plasmashell[4702]: KPackageStructure of KPluginMetaData(pluginId:"org.kde.plasma.simplemenu", fileName: "/usshare/plasma/plasmoids/org.kde.plasma.simplemenu/metadata.json") doe> May 14 10:46:46 redacted systemd[4438]: Started Display Configuration. May 14 10:46:46 redacted kded6[4665]: org.kde.plasma.appmenu: Got an error May 14 10:46:46 redacted kded6[4665]: org.kde.plasma.appmenu: Got an error May 14 10:46:47 redacted systemsettings[10617]: file:///uslib/qt6/qml/org/kde/kirigami/Dialog.qml:334:18: QML ScrollView: Binding loop detected for property "calculatedImplicitWidth" May 14 10:46:47 redacted systemsettings[10617]: qrc:/kcm/kcm_kscreen/main.qml:39:5: QML OverlaySheet: Binding loop detected for property "implicitHeight" May 14 10:46:47 redacted systemsettings[10617]: qrc:/kcm/kcm_kscreen/main.qml:39:5: QML OverlaySheet: Binding loop detected for property "implicitHeight" May 14 10:46:47 redacted systemsettings[10617]: qrc:/qt/qml/org/kde/systemsettings/CategoryItem.qml:33:13: Unable to assign IconPropertiesGroup_QMLTYPE_76 to IconPropertiesGroup_QMLTYPE_76 May 14 10:46:47 redacted systemsettings[10617]: qrc:/qt/qml/org/kde/systemsettings/CategoryItem.qml:33:13: Unable to assign IconPropertiesGroup_QMLTYPE_76 to IconPropertiesGroup_QMLTYPE_76 May 14 10:46:47 redacted systemsettings[10617]: qrc:/kcm/kcm_kscreen/main.qml:39:5: QML OverlaySheet: Binding loop detected for property "implicitWidth"I am going crazy and tried to revert back to X11 but the monitors don't work. I can actually see them when running xrandr but the system settings won't allow me to enable them again. I am not sure how to fix this.
2024.05.14 08:10 SolarSolutionCompany What Is The Power Output Of A Solar Panel
Read more: The Best Time to Install Solar Panels: A Strategic Guide for Savings and Sunshine
2024.05.14 05:34 RobotDragon0 PIC24: Trouble with converting between TMR value and seconds
#include "xc.h" #includeEdit: Updated code:bool stopMotion = 0; volatile unsigned long int currTime = 0; volatile unsigned long int finalTime = 0; volatile unsigned long int overflowtmr = 0; volatile unsigned long int distanceThreshold = 1; void setup(){ CLKDIVbits.RCDIV = 0; TRISBbits.TRISB4 = 1; //input for echo TRISBbits.TRISB5 = 0; //output for trig LATBbits.LATB5 = 0; //IC1 setup IC1CONbits.ICTMR = 0; //timer 3 IC1CONbits.ICM = 1; //PPS for IC1 __builtin_write_OSCCONL(OSCCON & 0xbf);// unlock PPS RPINR7bits.IC1R = 4; // RP4 (pin 11) __builtin_write_OSCCONL(OSCCON 0x40); // lock PPS T3CON = 0; TMR3 = 0; T3CONbits.TCKPS = 3; _T3IF = 0; PR3 = 65535; T3CONbits.TON = 1; IFS0bits.T3IF = 0; IEC0bits.T3IE = 1; IPC2bits.T3IP = 5; IFS0bits.IC1IF = 0; IEC0bits.IC1IE = 1; IPC0bits.IC1IP = 5; } void __attribute__((interrupt, auto_psv)) _IC1Interrupt(){ IFS0bits.IC1IF = 0; if(PORTBbits.RB4 == 1){ currTime = TMR3 + 65536*overflowtmr; } else{ finalTime = TMR3 + 65536*overflowtmr; overflowtmr = 0; TMR3 = 0; finalTime = (finalTime - currTime)*(256)/(16000000); volatile unsigned long int distance = finalTime/58; if(distance <= distanceThreshold) stopMotion = 1; else stopMotion = 0; } } void __attribute__((interrupt, auto_psv)) _T3Interrupt(){ IFS0bits.T3IF = 0; overflowtmr++; } void delay_ms(unsigned int ms){ while(ms-- > 0){ asm("repeat #15999"); asm("nop"); } } void delay_10us(void){ int i = 10; while(i-- > 0){ asm("repeat #3"); asm("nop"); } } void sendTrig(){ LATBbits.LATB5 = 1; delay_10us(); LATBbits.LATB5 = 0; } int main(void) { setup(); while(1){ sendTrig(); delay_ms(2000); } }
#include "xc.h" #includeEdit: Updated code for calculating distance by using time in units of a 10th of a nanosecond:bool stopMotion = 0; volatile double finalTime = 0; volatile unsigned long int overflowtmr = 0; volatile unsigned long int distanceThreshold = 1; void setup(){ CLKDIVbits.RCDIV = 0; TRISBbits.TRISB4 = 1; //input for echo TRISBbits.TRISB5 = 0; //output for trig LATBbits.LATB5 = 0; //IC1 setup IC1CONbits.ICTMR = 0; //timer 3 IC1CONbits.ICM = 1; //PPS for IC1 __builtin_write_OSCCONL(OSCCON & 0xbf);// unlock PPS RPINR7bits.IC1R = 4; // RP4 (pin 11) __builtin_write_OSCCONL(OSCCON 0x40); // lock PPS T3CON = 0; TMR3 = 0; T3CONbits.TCKPS = 3; _T3IF = 0; PR3 = 65535; T3CONbits.TON = 1; IFS0bits.T3IF = 0; IEC0bits.T3IE = 1; IPC2bits.T3IP = 3; IFS0bits.IC1IF = 0; IEC0bits.IC1IE = 1; IPC0bits.IC1IP = 3; //TIMER1 setup T1CON = 0; TMR1 = 0; T1CONbits.TCKPS = 0; _T1IF = 0; PR1 = 160; //for a delay of 10uS T1CONbits.TON = 0; IFS0bits.T1IF = 0; IEC0bits.T1IE = 1; IPC0bits.T1IP = 5; //higher priority than the input capture interrupt } void __attribute__((interrupt, auto_psv)) _IC1Interrupt(){ IFS0bits.IC1IF = 0; if(PORTBbits.RB4 == 1){ TMR3 = 0; } else{ finalTime = (double)(TMR3) + 65536*overflowtmr; overflowtmr = 0; TMR3 = 0; finalTime = (finalTime)*(256)/(16000000); double distance = finalTime/58; if(distance <= distanceThreshold) stopMotion = 1; else stopMotion = 0; } } void __attribute__((interrupt, auto_psv)) _T3Interrupt(){ IFS0bits.T3IF = 0; overflowtmr++; } void __attribute__((interrupt, auto_psv)) _T1Interrupt(){ _T1IF = 0; LATBbits.LATB5 = 0; } void sendTrig(){ LATBbits.LATB5 = 1; T1CONbits.TON = 1; } int main(void) { setup(); while(1){ sendTrig(); } }
#include "xc.h" #includebool stopMotion = 0; volatile uint32_t finalTime = 0; volatile int overflowtmr = 0; int distanceThreshold = 1; void setup(){ CLKDIVbits.RCDIV = 0; TRISBbits.TRISB4 = 1; //input for echo TRISBbits.TRISB5 = 0; //output for trig LATBbits.LATB5 = 0; //IC1 setup IC1CONbits.ICTMR = 0; //timer 3 IC1CONbits.ICM = 1; //PPS for IC1 __builtin_write_OSCCONL(OSCCON & 0xbf);// unlock PPS RPINR7bits.IC1R = 4; // RP4 (pin 11) __builtin_write_OSCCONL(OSCCON 0x40); // lock PPS T3CON = 0; TMR3 = 0; T3CONbits.TCKPS = 0; _T3IF = 0; //a prescaler of 1 for TMR3 PR3 = 65535; T3CONbits.TON = 1; IFS0bits.T3IF = 0; IEC0bits.T3IE = 1; IPC2bits.T3IP = 3; IFS0bits.IC1IF = 0; IEC0bits.IC1IE = 1; IPC0bits.IC1IP = 3; //TIMER1 setup T1CON = 0; TMR1 = 0; T1CONbits.TCKPS = 0; _T1IF = 0; PR1 = 160; //for a delay of 10uS T1CONbits.TON = 0; IFS0bits.T1IF = 0; IEC0bits.T1IE = 1; IPC0bits.T1IP = 5; //higher priority than the input capture interrupt } void __attribute__((interrupt, auto_psv)) _IC1Interrupt(){ IFS0bits.IC1IF = 0; if(PORTBbits.RB4 == 1){ //reset both TMR3 and overflowtmr TMR3 = 0; overflowtmr = 0; } else{ finalTime = (TMR3)*625 + overflowtmr*625*65536; overflowtmr = 0; TMR3 = 0; uint16_t distance = finalTime/((58)*(10000/10)); if(distance <= distanceThreshold) stopMotion = 1; else stopMotion = 0; } } void __attribute__((interrupt, auto_psv)) _T3Interrupt(){ IFS0bits.T3IF = 0; overflowtmr++; } void __attribute__((interrupt, auto_psv)) _T1Interrupt(){ _T1IF = 0; LATBbits.LATB5 = 0; } void sendTrig(){ LATBbits.LATB5 = 1; T1CONbits.TON = 1; } void delay_ms(unsigned int ms){ while(ms-- > 0){ asm("repeat #15999"); asm("nop"); } } int main(void) { setup(); while(1){ sendTrig(); delay_ms(2000); //delay for 2 seconds before sending another trig signal. } }
2024.05.14 01:19 Hardyskater26 Issue importing in popular NYC airbnb Dataset into SQL table
2024.05.13 16:15 Cool-Chocolate3193 Intersecting driving routes
2024.05.12 08:48 rbaleksandar Unable to fix invalid value for glBufferSubData() call in OpenGL (Python)
import pygame from pathlib import Path from pygame.locals import * from import * from import shaders import numpy as np import pywavefront from math import sin, cos, tan, atan2 def calcFrustumScale(fFovDeg): degToRad = np.pi * 2.0 / 360.0 fFovRad = fFovDeg * degToRad return 1.0 / tan(fFovRad / 2.0) def calcLerpFactor(fElapsedTime, fLoopDuration): fValue = (fElapsedTime % fLoopDuration) / fLoopDuration if fValue > 0.5: fValue = 1.0 - fValue return fValue * 2.0 def computeAngleRad(fElapsedTime, fLoopDuration): fScale = np.pi * 2.0 / fLoopDuration fCurrTimeThroughLoop = fElapsedTime % fLoopDuration return fCurrTimeThroughLoop * fScale def load_model(single_model_path: Path, color: np.array = np.array([*np.random.uniform(0.0, 1.0, 3), 1.0], dtype='float32')): scene = pywavefront.Wavefront(single_model_path, collect_faces=True) model = { 'vertex' : np.array(scene.vertices, dtype='float32'), 'face' : np.array(scene.mesh_list[0].faces, dtype='uint32') } model['color'] = np.full((len(model['vertex']), 4), color, dtype='float32') return model def get_size(model: dict, stype: str='vertex'): items = model[stype] #return items.size * items.itemsize return items.nbytes def get_transform(elapsed_time): angle_rad = computeAngleRad(elapsed_time, 2.0) _cos = cos(angle_rad) _sin = sin(angle_rad) transform = np.identity(4, dtype='float32') transform[0][0] = _cos transform[2][0] = _sin transform[0][2] = -_sin transform[2][2] = _cos # offset transform[0][3] = 0.0 #-5.0 transform[1][3] = 0.0 #5.0 transform[2][3] = -5 return transform # ======================================================= color_rnd = np.array([*np.random.uniform(0.0, 1.0, 3), 1.0], dtype='float32') print(color_rnd) modelToCameraMatrixUnif = None cameraToClipMatrixUnif = None # Global display variables cameraToClipMatrix = np.zeros((4,4), dtype='float32') fFrustumScale = calcFrustumScale(45.0) model = load_model('sample0.obj') update_enabled = False print('Model vertex bytesize:\t', get_size(model, 'vertex')) print('Model face bytesize: \t', get_size(model, 'face')) print('Model color bytesize: \t', get_size(model, 'color')) CUBES_COUNT = 10 VBO_BUFFER_SIZE = CUBES_COUNT * get_size(model, 'vertex') VBO_SUB_BUFFER_SIZE = get_size(model, 'vertex') print('VBO total bytesize:', VBO_BUFFER_SIZE) IBO_BUFFER_SIZE = CUBES_COUNT * get_size(model, 'face') IBO_SUB_BUFFER_SIZE = get_size(model, 'face') print('IBO total bytesize:', IBO_BUFFER_SIZE) CBO_BUFFER_SIZE = CUBES_COUNT * get_size(model, 'color') CBO_SUB_BUFFER_SIZE = get_size(model, 'color') print('CBO total bytesize:', CBO_BUFFER_SIZE) UPDATE_INTERVAL = 10 # Time interval between updates (in frames) vertex_shader = ''' #version 330 layout(location = 0) in vec4 position; layout(location = 1) in vec4 color; smooth out vec4 theColor; uniform mat4 cameraToClipMatrix; uniform mat4 modelToCameraMatrix; void main() { vec4 cameraPos = modelToCameraMatrix * position; gl_Position = cameraToClipMatrix * cameraPos; theColor = color; } ''' fragment_shader = ''' #version 330 smooth in vec4 theColor; out vec4 outputColor; void main() { outputColor = theColor; } ''' vbo = None cbo = None ibo = None vao = None program = None def initialize(): global model global vbo, cbo, ibo, vao global program global modelToCameraMatrixUnif, cameraToClipMatrixUnif, cameraToClipMatrix pygame.init() display = (800, 800) pygame.display.set_mode(display, DOUBLEBUF OPENGL) vertex_shader_id = shaders.compileShader(vertex_shader, GL_VERTEX_SHADER) fragment_shader_id = shaders.compileShader(fragment_shader, GL_FRAGMENT_SHADER) program = shaders.compileProgram(vertex_shader_id, fragment_shader_id) glUseProgram(program) glEnable(GL_CULL_FACE) glCullFace(GL_BACK) glFrontFace(GL_CW) glEnable(GL_DEPTH_TEST) glDepthMask(GL_TRUE) glDepthFunc(GL_LEQUAL) glDepthRange(0.0, 1.0) modelToCameraMatrixUnif = glGetUniformLocation(program, "modelToCameraMatrix") cameraToClipMatrixUnif = glGetUniformLocation(program, "cameraToClipMatrix") fzNear = 1.0 fzFar = 100.0 # Note that this and the transformation matrix below are both # ROW-MAJOR ordered. Thus, it is necessary to pass a transpose # of the matrix to the glUniform assignment function. cameraToClipMatrix[0][0] = fFrustumScale cameraToClipMatrix[1][1] = fFrustumScale cameraToClipMatrix[2][2] = (fzFar + fzNear) / (fzNear - fzFar) cameraToClipMatrix[2][3] = -1.0 cameraToClipMatrix[3][2] = (2 * fzFar * fzNear) / (fzNear - fzFar) glUseProgram(program) glUniformMatrix4fv(cameraToClipMatrixUnif, 1, GL_FALSE, cameraToClipMatrix.transpose()) glUseProgram(0) vbo = glGenBuffers(1) glBindBuffer(GL_ARRAY_BUFFER, vbo) glBufferData( GL_ARRAY_BUFFER, model['vertex'].flatten(), GL_STATIC_DRAW ) glBindBuffer(GL_ARRAY_BUFFER, 0) cbo = glGenBuffers(1) glBindBuffer(GL_ARRAY_BUFFER, cbo) glBufferData( GL_ARRAY_BUFFER, model['color'].flatten(), GL_STATIC_DRAW ) glBindBuffer(GL_ARRAY_BUFFER, 0) ibo = glGenBuffers(1) glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo) glBufferData( GL_ELEMENT_ARRAY_BUFFER, model['face'].flatten(), GL_STATIC_DRAW ) glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0) vao = glGenVertexArrays(1) glBindVertexArray(vao) vertex_dim = model['vertex'].shape[1] glBindBuffer(GL_ARRAY_BUFFER, vbo) glEnableVertexAttribArray(0) glVertexAttribPointer(0, vertex_dim, GL_FLOAT, GL_FALSE, 0, None) color_dim = model['color'].shape[1] glBindBuffer(GL_ARRAY_BUFFER, cbo) glEnableVertexAttribArray(1) glVertexAttribPointer(1, color_dim, GL_FLOAT, GL_FALSE, 0, None) glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo) glBindVertexArray(0) def update_vbo(offset_prev, offset_curr): global vbo global model print('(VBO) Removing data at ({}:{})'.format(offset_prev, offset_prev + VBO_SUB_BUFFER_SIZE)) print('(VBO) Adding data at ({}:{})'.format(offset_curr, offset_curr + VBO_SUB_BUFFER_SIZE)) glBindBuffer(GL_ARRAY_BUFFER, vbo) glBufferSubData(GL_ARRAY_BUFFER, offset_prev, VBO_SUB_BUFFER_SIZE, None) glBufferSubData(GL_ARRAY_BUFFER, offset_curr, VBO_SUB_BUFFER_SIZE, model['vertex'].flatten()) #glBufferSubData(GL_ARRAY_BUFFER, 0, VBO_SUB_BUFFER_SIZE, model['vertex'].flatten()) glBindBuffer(GL_ARRAY_BUFFER, 0) def update_cbo(offset_prev, offset_curr, color: np.array = np.array([*np.random.uniform(0.0, 1.0, 3), 1.0], dtype='float32')): global cbo global model model['color'] = np.full((len(model['vertex']), 4), color, dtype='float32') print('(CBO) Removing data at ({}:{})'.format(offset_prev, offset_prev + CBO_SUB_BUFFER_SIZE)) print('(CBO) Adding data at ({}:{})'.format(offset_curr, offset_curr + CBO_SUB_BUFFER_SIZE)) glBindBuffer(GL_ARRAY_BUFFER, cbo) glBufferSubData(GL_ARRAY_BUFFER, offset_prev, CBO_SUB_BUFFER_SIZE, None) glBufferSubData(GL_ARRAY_BUFFER, offset_curr, CBO_SUB_BUFFER_SIZE, model['color'].flatten()) #glBufferSubData(GL_ARRAY_BUFFER, 0, CBO_SUB_BUFFER_SIZE, model['color'].flatten()) glBindBuffer(GL_ARRAY_BUFFER, 0) def update_ibo(offset_prev, offset_curr): global ibo global model print('(IBO) Removing data at ({}:{})'.format(offset_prev, offset_prev + IBO_SUB_BUFFER_SIZE)) print('(IBO) Adding data at ({}:{})'.format(offset_curr, offset_curr + IBO_SUB_BUFFER_SIZE)) glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo) glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, offset_prev, IBO_SUB_BUFFER_SIZE, None) glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, offset_curr, IBO_SUB_BUFFER_SIZE, model['face'].flatten()) #glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, 0, IBO_SUB_BUFFER_SIZE, model['face'].flatten()) glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0) def render(): global vao, model global modelToCameraMatrixUnif glClearColor(0.0, 0.0, 0.0, 0.0) glClearDepth(1.0) glClear(GL_COLOR_BUFFER_BIT GL_DEPTH_BUFFER_BIT) glUseProgram(program) elapsed_time = pygame.time.get_ticks() / 1000.0 transform_func = get_transform transformMatrix = transform_func(elapsed_time=elapsed_time) glUniformMatrix4fv(modelToCameraMatrixUnif, 1, GL_FALSE, transformMatrix.transpose()) glBindVertexArray(vao) index_count = model['face'].size glDrawElements(GL_TRIANGLES, index_count, GL_UNSIGNED_INT, None) glBindVertexArray(0) pygame.display.flip() def main(): global update_enabled initialize() frame_count = 0 offsets = { 'vbo' : { 'prev' : 0, 'curr' : 0 }, 'cbo' : { 'prev' : 0, 'curr' : 0 }, 'ibo' : { 'prev' : 0, 'curr' : 0 } } while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() quit() if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: pygame.quit() quit() if event.key == pygame.K_u: update_enabled = not update_enabled if update_enabled: print('Update triggered') if update_enabled: # and frame_count % UPDATE_INTERVAL == 0: idx = 3 offsets['vbo']['curr'] = idx * VBO_SUB_BUFFER_SIZE) update_vbo(offsets['vbo']['prev'], offsets['vbo']['curr']) offsets['vbo']['prev'] = offsets['vbo']['curr'] offsets['cbo']['curr'] = idx * CBO_SUB_BUFFER_SIZE color = np.array([*np.random.uniform(0.0, 1.0, 3), 1.0], dtype='float32') update_cbo(offsets['cbo']['prev'], offsets['cbo']['curr'], color) offsets['cbo']['prev'] = offsets['cbo']['curr'] offsets['ibo']['curr'] = idx * IBO_SUB_BUFFER_SIZE update_ibo(offsets['ibo']['prev'], offsets['ibo']['curr']) offsets['ibo']['prev'] = offsets['ibo']['curr'] update_enabled = False render() frame_count += 1 if __name__ == '__main__': main()OpenGL.GLOpenGL.GLAccording to the PyOpenGL documentation my call of glBufferSubData() should be correct. My print statement give me:
Model vertex bytesize: 96 Model face bytesize: 144 Model color bytesize: 128 VBO total bytesize: 960 IBO total bytesize: 1440 CBO total bytesize: 1280which is correct. After all my cube has e.g. 8 faces, each with 3 float32 (4 bytes à piece) components, so 8*3*4 = 96.
raise self._errorClass( OpenGL.error.GLError: GLError( err = 1281, description = b'invalid value', baseOperation = glBufferSubData, pyArgs = ( GL_ARRAY_BUFFER, 768, 96, array([ 1., 1., -1., 1., -1., -1., 1., 1., 1., 1., -1., 1., -1., 1., -1., -1., -1., -1., -1., 1., 1...., ), cArgs = ( GL_ARRAY_BUFFER, 768, 96, array([ 1., 1., -1., 1., -1., -1., 1., 1., 1., 1., -1., 1., -1., 1., -1., -1., -1., -1., -1., 1., 1...., ), cArguments = ( GL_ARRAY_BUFFER, 768, 96, array([ 1., 1., -1., 1., -1., -1., 1., 1., 1., 1., -1., 1., -1., 1., -1., -1., -1., -1., -1., 1., 1...., ) )The invalid value error is related to incorrect offset or offset + size. I cannot see any issue with the data portion of the call since the that is the same data I use to fill my buffer at the beginning and it renders perfectly fine. Perhaps my logic for calculating the offsets for the colors and indices is incorrect but at least the VBO should work. Each sub-data chunk (representing the respective components - vertices, colors or indices) can be accessed by a given IDX multiplied by the respective sub-data byte size. This allows me to access all three types of data for the same object I want to render. If the IDX is anything but 0, my code crashes with the above mentioned error.
2024.05.12 05:17 Ihruoan My former Soldier lost his CAB paperwork and it never made it into iPerms before the IPPS-A transition
2024.05.12 02:32 Atoraxic "Gang Stalking: Real-Life Harassment or Textbook Paranoia". Idiocy or Calculated Disinformation
Gang Stalking: Real-Life Harassment or Textbook Paranoia?At first it seems like a legitimate question, but in reality he created a false dilemma, a manipulation technique that presents us with a false premise right off the bat.
Discovery or the following materials and methods: that will promote illogical thinking and impulsiveness to the point where the receiver would be discredited in public, increase the frequency of mentaion and perception, prevent or counteract the effects of alcohol, promote signs and symptoms of recognized diseases in a reversible way so they can be used for malingering and produce physical disablement like paralysis.p9 Lineville 4-26-2016 Project MKULTRA and the Search for Mind Control: Clandestine Use of LSD Within the CIA Tani M. Linville Cedarville University, tanilinville@cedarville.edu
Reports of “gang stalking" (a.k.a. "gang-stalking" or "gangstalking") began emerging at least 15 years ago..Poisoner in Chief: Sidney Gottlieb and the CIA Search for Mind Control is a 2019 book by The New York Times journalist and historian[1] Stephen Kinzer. I repeatedly details how when eventually questioned by the US Congress and others Sydney Gottlieb and other conspirators stated they did such heinous disturbing acts because they were extremely fearful the Russians and Chinese would master "mind control" first, thus winning the Cold War. They used fear to justify torturing, completely and permanently devastating and destroying the minds and lives of innocent victims while also murdering, after torture, many enemy captives.
by self-described “targeted individuals” (“T.I.s”) claiming to be followed, surveilled, harassed, and otherwise victimized by unknown forces wielding high-tech weapons of “mind control.” Since then, much more has been written about this phenomenon, especially over the past few years, with national attention devoted to a few notable cases of violence and mass shootings perpetrated by people identifying as T.I.s.1What we see here is the introduction to his disinformation. It identifies the targets to be discredited. Targeted Individuals, high tech weapons of mind control, notable cases of violence and mass shootings perpetrated by people identifying as T.I.s, as well as the growing number mainstream and highly respected journalism pieces that have come out exposing it while giving sympathy and empathy to its victims.
By way of summary, T.I.s typically describe living in a state of constant fear, seeing evidence of being followed by unmarked police cars in every black SUV that drives by, of being zapped by “extremely low-frequency” (ELF) radiation or “Voice to Skull” (V2K) technology in every tingling sensation or bodily ache, and of malevolent intentions in other people’s every gesture.The false dilemma offers harassment or delusion as explanations.
2.2. Delusions as a shear pin According to the “shear-pin” account developed by McKay and Dennett (2009), some false beliefs that help manage negative emotions and avoid low self-esteem and depression can count as psychologically adaptive. McKay and Dennett suggest that, in situations of extreme stress, motivational influences are allowed to intervene in the process of belief evaluation, causing a breakage. Although the breakage is bad news epistemically, as the result is that people come to believe what they desire to be true and not what they have evidence for, it is not an evolutionary “mistake”, rather it is designed to avoid breakages that would have worse consequences for the person’s self-esteem and wellbeing. What might count as a doxastic analogue of shear pin breakage? We envision doxastic shear pins as components of belief evaluation machinery that are “designed” to break in situations of extreme psychological stress (analogous to the mechanical overload that breaks a shear pin or the power surge that blows a fuse). Perhaps the normal function (both normatively and statistically construed) of such components would be to constrain the influence of motivational processes on belief formation. Breakage of such components, therefore, might permit the formation and maintenance of comforting misbeliefs – beliefs that would ordinarily be rejected as ungrounded, but that would facilitate the negotiation of overwhelming circumstances (perhaps by enabling the management of powerful negative emotions) and that would thus be adaptive in such extraordinary circumstances. 📷The epistemic innocence of motivated delusions Delusions are defined as irrational beliefs that compromise good functioning. However, in the empirical literature, delusions have been found to have …📷 www.sciencedirect.comClick to expand...
and of malevolent intentions in other people’s every gesture.This symptom comes from the prolonged trauma. This reports on findings for victims of trauma after the fact. Because sustained severe trauma , that is continuously forced for years, hasn't been studied by any ethical scientist we have to settle for these humane results.*
For example, the “who” is variably attributed to neighbors, ex-boyfriends, employers, police, and other law enforcement agencies, “the financial elite,” or less conventional sources, like Freemasons and space aliens. The “why” is often attributed to retaliation for ending relationships, acting as whistleblowers at work, political activism, having run-ins with the law, or being privy to secret information.These are all attempts to begin to resolve the trauma. The interface also promotes the need to resolve the trauma by repeatedly triggering the victim with statements like "I can't believed you don't know who is doing this to you", "How is this even possible" and "why is this being done to you." The interface mixes in delusional suggestions to answer these questions. The real reason this is being done to them is they fit a profile that fulfills a part of a sample population created to reflect society as a whole and thats it. There are a few exceptions, but thats very rare.
Seemingly motiveless harassment is chalked up to being hapless victims of experimentation by government agencies testing new techniques of surveillance or mind control.This is what it is, it may be private sector but its roots are in the government agencies thats work is kept secret and its motivation is the remote thought reform, brainwashing and control of individual victims.
If there’s a common thread to the accounts of gang stalking, it’s that T.I.s describe considerable suffering not only as a result of ongoing concerns about being harassed, but also from the experience of physical symptoms, like pain and “hearing voices,” and the significant social stigma associated with sharing their claims with family, friends, or mental health professionals who routinely dismiss them as “crazy.” As a result, T.I.s have found solace on the internet, where they share "war stories" and survival strategies with like-minded individuals who have similarly found themselves at the center of a vast conspiracy theory.This is all true for once. People facing a common enemy or struggle often congregate together as in the 12 step groups. When you look at the devious nature of what we face its easy to see why its difficult to share with some people with out them being totally fooled by this program as thats what its designed to do.
And yes, a few real events in history, such as the CIA’s MK-Ultra “mind control” program and the FBI’s COINTELPRO surveillance program of the 1950s, have occurred, just as the modern-day mass manipulation of human behavior through social media is a reality in which we all now live.This is the rebooted MK program, if it ever went away at all. Thats a fact.
But if you aren’t personally experiencing gang stalking, it’s hard for an outsider, much less a psychiatrist, to accept it as anything other than a textbook example of paranoia. Indeed, that’s been the conclusion of the few mental health researchers that have examined gang stalking to date. In 2006, Dr. Vaughn Bell and colleagues published an analysis of 10 online accounts of “mind control experiences” consistent with gang stalking (though they didn’t mention that word explicitly).2 When assessed by three independent psychiatrists, all of the accounts were classified as consistent with the evidence of a psychotic disorderYou have to stop trying to use the psychotic effects of severe trauma to discount that the trauma is happening.
n 2015, Drs. Lorraine Sheridan and David James conducted an analysis of 128 responses to a survey about stalking that similarly concluded that 100 percent of cases involving gang stalking by multiple coordinated individuals reflected paranoid delusions (in contrast, only 4 percent of those reporting stalking by a single individual were deemed to be delusional).3 In both of these studies, gang stalking claims were attributed to paranoia because they defied credulity, often due to the sheer amount of resources or level of coordinated organization that would be necessary to carry out what was claimed.The need for sustained unexplainable trauma resolution creates a dynamic for delusion and easily accepted suggested delusion.
As a psychiatrist, it’s nearly impossible to disagree with those conclusions. Delusions are defined in psychiatry as “fixed, false beliefs,” with paranoia representing a classic version in which one believes they’re being followed, harassed, or otherwise persecuted. Vigilance—keeping an eye out for and being generally wary of potential threats—is normal and can transform into exaggerated hypervigilance under various conditions, such as having been an actual victim of violence. At the extreme, full-blown paranoia of delusional intensity can be understood as that same evolutionary warning system gone completely awry, to the point of seeing the evidence and believing that such threats are almost everywhere.This was already explained, but to recap never-ending unresolvable trauma causes paranoia, hyper vigilance and leaves victims very vulnerable to delusion.
But digging deeper tells a different story. Many T.I.s report concerns not only about gang stalking but other common symptoms of mental illness, such as auditory hallucinations or voice-hearing and even less plausible beliefs, such as having “implants” inside their bodies that can control their thoughts or that people have been replaced by aliens. But even those with “pure” paranoia appear to display textbook examples of delusional thinking.already explained. Victims have tried to explain how the tech works and years back implants could have been part of the solution, but now of course with the advent of technology in the civilian world victims recognize that no implant is needed to track someone.
First, there’s the unbelievably vast extent of what’s claimed… fleets of black SUVs with tinted windows, persecutors in disguise on every street corner, and futuristic secret technology being deployed from God knows where. Second, there’s a lack of any obvious or credible motive for the persecution…Here he is giving a dose of the discrediting. The SUV are paranoia from the trauma. The tech is real.. and lol I don't think god has figured out where its coming from yet. It comes from speakers and an infrasonic microphone array and sound source triangulation software should solve this one finally.
futuristic secret technology being deployed from God knows where.Nothing unbelievable about about "futuristic secret technology". It comes from infrasonic speakers or generators and infrasonic microphone array should easily triangulate the location of the sources.
… why would the CIA be devoting considerable resources to keep an “average Joe” under constant surveillance for years on end (note that paranoia and grandiosity—an exaggerated sense of self-importance—often go hand-in-hand)?The surveillance is automated and done by a computer. No person is watching it all day. Certain thoughts and behaviors are tagged and logged for analysis if desired. This is clear as times you can do a certain behavior or think a certain thought and Alice will always make the same specific comment, indicating a grouping by the system.. you can sit there and just play with it entering in different thoughts and pay attention to its response. Its response is often a simple abusive statement the always conforms to the input groups.
Third, the persecutory experiences continue regardless of attempts to escape or relocateEscapable trauma wouldn't seem so hopeless nor tend to cause helplessness. They do have to relocate equipment when a victim moves or flees. So often times the torment is significantly reduced right after a move. They say this is a reward for moving, but its just the time it takes them to get equipment to the new location. It took them almost 6 weeks when I last jumped states. Well It is government work.
2024.05.11 22:05 Hel-Low5302 Really high Total Negative Slack
always @ (posedge clk) begin if ((counter%1024) == 0) begin //counter counts all clock cycles starting from 0, it doesn't reset counter_eq_1024 <= 1'b1; end else begin counter_eq_1024 <= 1'b0; end end always@(posedge clk) begin (* multicycle = "1024" *) if (counter_eq_1024) begin // decimation factor is 1024 y = y_measured; u = -omega*T_s*k_omega*y; K_next = (phi*phi*e_pre_var + d_var)/(phi*phi*e_pre_var + d_var + s_var); x_curr = (1-K_next)*x_next + K_next*y; // estimate the current state x_next = phi*x_curr + u; // predict the next state e_next_var = (1-K_next)*(1-K_next)*(phi*phi*e_pre_var + d_var) + K_next*K_next*s_var; end endhttps://preview.redd.it/d6pniuw8uuzc1.png?width=1606&format=png&auto=webp&s=b209b8ad1ce8b39c497dcce33ef9e4ef3df9e9c7
2024.05.11 13:31 devoid0101 New Kp-like planetary geomagnetic activity indices: Hourly (Hp60) and half-hourly (Hp30) indices
It looks like we touched upon KP11, a G6 storm (if such a thing existed). Serious solarmax spaceweather submitted by devoid0101 to Heliobiology [link] [comments] “The geomagnetic Hpo index is a Kp-like index with a time resolution of half an hour, called Hp30, and one hour, called Hp60. besides that, the Hpo index is not capped at 9 like Kp, but is an open ended index that describes the strongest geomagnetic storms more nuanced than the three-hourly Kp, which is limited to the maximum value of 9. Next to the Hpo we also provide the linear apo index (ap30 and ap60). The Hpo index was developed in the H2020 project SWAMI and is described in Yamazaki et al (2022). Abstract The geomagnetic activity index Kp is widely used but is restricted by low time resolution (3-hourly) and an upper limit. To address this, new geomagnetic activity indices, Hpo, are introduced. Similar to Kp, Hpo expresses the level of planetary geomagnetic activity in units of thirds (0o, 0+, 1−, 1o, 1+, 2−, …) based on the magnitude of geomagnetic disturbances observed at subauroral observatories. Hpo has a higher time resolution than Kp. 30-min (Hp30) and 60-min (Hp60) indices are produced. The frequency distribution of Hpo is designed to be similar to that of Kp so that Hpo may be used as a higher time-resolution alternative to Kp. Unlike Kp, which is capped at 9o, Hpo is an open-ended index and thus can characterize severe geomagnetic storms more accurately. Hp30, Hp60 and corresponding linearly scaled ap30 and ap60 are available, in near real time, at the GFZ website (https://www.gfz-potsdam.de/en/hpo-index). Key Points New Kp-like planetary geomagnetic activity indices, Hpo, are presented Hourly (Hp60) and half-hourly (Hp30) indices are available from GFZ website Hpo indices are open-ended without the upper limit at 9o Plain Language Summary The geomagnetic activity index Kp is a measure of planetary geomagnetic activity, expressed in units of thirds (0o, 0+, 1−, 1o, 1+, 2−, …9o). Kp is widely used in the space physics community, as it is known to be a good proxy of the solar-wind energy input into the magnetosphere-ionosphere-thermosphere system. Kp has two important limitations. One is the temporal resolution. Kp is a three-hourly index, so that temporal features within 3 hr are not resolved. The other is the upper limit of the index. Kp does not exceed a maximum value of 9o, so that under extremely disturbed conditions, geomagnetic activity is not accurately represented. We introduce a group of new geomagnetic activity indices Hpo that overcomes these limitations. Hpo is designed to represent planetary geomagnetic activity in a similar way as Kp but with higher temporal resolution and without the upper limit at 9o. This paper describes the production of 30-min (Hp30) and 60-min (Hp60) indices, and demonstrates their properties in comparison with Kp. Hpo indices since 1995, including near-real-time values, are distributed through the GFZ website (https://www.gfz-potsdam.de/en/hpo-index). 1 Introduction Variations in the solar wind cause changes in electric currents that flow in the magnetosphere and ionosphere. The associated changes in the magnetic field can be observed using magnetometers on the ground. There exist various types of geomagnetic indices to monitor the intensity of geomagnetic disturbance associated with solar wind variations (Mayaud, 1980). The Kp index is one of the most widely used indices of geomagnetic activity. The derivation, application and historical background of Kp are detailed in Matzka, Stolle, et al. (2021), and thus are described here only briefly. Kp is derived from K indices (Bartels et al., 1939) evaluated at 13 subauroral observatories from both northern and southern hemispheres. A K index expresses geomagnetic activity on a scale of 0–9 at each observatory for a given 3-hourly interval of the UT day (00–03, 03–06, …, 21–24 UT). It is based on the range of geomagnetic disturbance over the 3-hourly interval, which may contain geomagnetic pulsations (McPherron, 2005; Saito, 1969), bays associated with substorms (McPherron, 1970; Lyons, 1996), sudden storm commencements and sudden impulses (Araki, 1994), geomagnetic storm main phase (Gonzalez et al., 1994) and solar-flare and eclipse effects (Yamazaki & Maute, 2017). K is designed to have a similar frequency distribution regardless of observatory, and thus it does not depend on latitude. K indices are converted to standardized Ks indices, which take into account the influence of seasonal and UT biases. Kp is the average of the 13 Ks indices expressed in units of thirds (0o, 0+, 1−, 1o, 1+, 2−, …, 9o), thus it represents planetary, rather than local, geomagnetic activity. The complete time series of the definitive Kp index since 1932 and nowcast indices for the most recent hours are available from the Kp website at Deutsches GeoForschungsZentrum GFZ (https://www.gfz-potsdam.de/en/kp-index/) with a digital object identifier (DOI; Matzka, Bronkalla, et al., 2021). Real-time Kp forecasts (Shprits et al., 2019) based on solar wind data are also available from the GFZ website (https://spaceweather.gfz-potsdam.de/products-data/forecasts/kp-index-forecast). The Kp index has a wide range of applications in space physics studies. For example, Kp can be used to select undisturbed data from the measurements obtained from the magnetosphere, ionosphere or thermosphere to determine their climatological base states (e.g., Drob et al., 2015; Fejer et al., 2008). Kp is also often used for modeling the geospace response to solar wind variations. Just to give a few examples, Kp is used to drive the 3-D Versatile Electron Radiation Belt model (Subbotin et al., 2011), the Whole Atmosphere Community Climate Model with thermosphere and ionosphere extension (WACCM-X; Liu et al., 2018) and the Naval Research Laboratory Mass Spectrometer Incoherent Scatter radar empirical atmospheric model (Emmert et al., 2021), among many other models of the magnetosphere, ionosphere and thermosphere. Thomsen (2004) argued that what makes Kp so useful is its sensitivity to the latitudinal distance from the Kp stations to the equatorial edge of auroral currents, which is tightly linked to the strength of magnetospheric convection. Kp has two important limitations. One is the temporal resolution. Kp cannot resolve temporal features within 3 hr. For example, the onset of geomagnetic disturbance determined by Kp could be off from the actual onset by up to 3 hr. This could be an issue when Kp is used to drive a geospace model, because the state of the magnetosphere, ionosphere and thermosphere can change significantly within the 3-hr interval. As a compromise, some models use interpolated Kp values as input data, for example, thermospheric density models (Vallado & Finkleman, 2014), WACCM-X (Liu et al., 2018). The other limitation of Kp is its upper limit at 9o. Kp is not able to quantify geomagnetic activity after it reaches 9o. Extreme geomagnetic storms involving Kp = 9o are not necessarily equally strong in terms of geomagnetic disturbance. Extrapolated values of Kp above 9o are sometimes used for a better representation of geomagnetic activity during severe geomagnetic storms (e.g., Shprits et al., 2011). The objective of this paper is to introduce a new group of Kp-like geomagnetic indices. The indices are collectively called Hpo, where “H” stands for half-hourly or hourly, “p” for planetary, and “o” for open-ended. Hpo has been conceived and developed under the EU Horizon 2020 project, Space Weather Atmosphere Model and Indices (SWAMI; Jackson et al., 2020). Hpo is designed to represent planetary geomagnetic activity in a similar manner as Kp but with higher time resolution and without an upper limit, to overcome the limitations of Kp described above. The derivation of 30-min (Hp30) and 60-min (Hp60) indices is outlined in Section 2, and their basic properties are described in Section 7. 2 Derivation of Hpo Hpo indices are derived using 1-min magnetic data from the same 13 subauroral observatories as Kp (see Section 2.2 of Matzka, Stolle, et al., 2021). Time series of Hpo starts from the year 1995, because 1-min digital data are not available from all the observatories before 1995. The procedure for deriving Hpo is similar to that for nowcast Kp described in Matzka, Stolle, et al. (2021), involving the steps described below. 2.1 Evaluation and Removal of Quiet Curve Records of the geomagnetic field from a ground station contain regular quiet daily variation and geomagnetic disturbance (Chapman & Bartels, 1940). The estimation of the quiet curve for Hpo is based on the Finnish Meteorological Institute method (Sucksdorff et al., 1991), which uses 1-min data from the previous day, present day, and subsequent day. The quiet curve is obtained for the northward X and eastward Y components of the geomagnetic field, and subtracted from the corresponding data, which leaves geomagnetic disturbance. 2.2 Evaluation of the Magnitude of Geomagnetic Disturbance The magnitude of geomagnetic disturbance is evaluated for every 30-min interval for Hp30 and 60-min interval for Hp60. For a given time interval, the range of geomagnetic disturbance (i.e., maximum minus minimum value) is compared with the maximum absolute value of geomagnetic disturbance, and the larger value of the two is adopted as the magnitude of geomagnetic disturbance. This contrasts with the derivation procedure for Kp, which always uses the range of geomagnetic disturbance. We found that this modification of the procedure improves the compatibility between Hpo and Kp. The magnitude of geomagnetic disturbance is obtained for the X and Y components, and the larger value is used in the next step. 2.3 Evaluation of H30 and H60 Indices H30 and H60 indices are analogous to K indices for Kp, and are collectively called H herein. For the evaluation of K, an observatory-specific table is used for converting the magnitude of geomagnetic disturbance (in nT) to an integer K value (0–9). An example of the conversion table for the Niemegk observatory can be found in Table 1. New tables have been created for each observatory that convert the magnitude of geomagnetic disturbance to an H value (0–9). This was done, for each observatory, by generating a conversion table for H in such a manner that the frequency distribution of H is as similar as possible to the frequency distribution of K. The construction of the conversion tables for H is based on the geomagnetic data during 1995–2017, which were all the available data when the construction of Hpo was initiated. The conversion table for H30 and H60 for Niemegk is presented in Table 1. Furthermore, extended conversion tables are produced in order to allow H to go beyond 9. In the extended conversion tables, the maximum value of H is unlimited. The lower limit for H = 10 is given by the lower limit of H = 9 multiplied by a factor of 1.35. The lower limit of H = 11 is given by the lower limit of H = 10 multiplied by a factor of 1.30, and the lower limit of H = 12 is given by the lower limit of H = 11 multiplied by a factor of 1.20. For values of H greater than 12, the multiplication factor will be always 1.20, so that H can be defined no matter how large the magnitude of geomagnetic disturbance is. These multiplication factors were determined on a trial-and-error basis so that the behavior of the final Hpo index above 9o will be compatible with those of other open-ended indices (see Section 7). Table 1. Lower Limits of H30, H60, and K for the Niemegk Observatory Index 0 1 2 3 4 5 6 7 8 9 H30 (nT) 0 2.16 4.46 8.89 17.9 33.9 65.7 119 190 267 H60 (nT) 0 2.97 6.11 12.1 24.3 44.7 82.7 144 218 337 K (nT) 0 5.00 10.0 20.0 40.0 70.0 120 200 330 500 2.4 Evaluation of Hp30 and Hp60 Indices H indices are converted to standardized Hs indices using the same method for converting K to Ks. The conversion tables can be found in the Supporting Information of Matzka, Stolle, et al. (2021). The conversion of H to Hs minimizes the influence of seasonal and UT biases. Finally, the average of the 13 Hs indices is converted into Hpo values in units of thirds (0, 1/3, 2/3, 1, 4/3, 5/3, 2, …) in analog fashion as with the nowcast Kp (see Section 3.3 of Matzka, Stolle, et al., 2021) and expressed as (0o, 0+, 1−, 1o, 1+, 2−, …) following the convention for Kp. The Hpo value is derived using the H indices evaluated with the conversion tables capped at 9 (like the one shown in Table 1). If this initial Hpo value is 9o, all the H indices are re-evaluated using the extended conversion tables, in which H can go beyond 9, to re-calculate Hpo. This ensures that Hpo and Kp behave similarly up to 9− (and differently only at 9o and above). Like Kp, Hpo indices are a quasi-logarithmic, rather than linear, measure of geomagnetic activity, and thus are not suitable for basic arithmetic operations such as addition and multiplication. To avoid this issue, linearly scaled ap30 and ap60 indices (collectively called apo) are produced for Hp30 and Hp60, respectively, by using the table that is used for producing ap from Kp (Matzka, Stolle, et al., 2021) but extending its higher end in a similar manner as the extension of H tables above 9. The relationship between Hpo and apo is illustrated in Figure 1a. Like ap, values of apo correspond to half the magnitude of geomagnetic disturbance at Niemegk. Details are in the caption following the image Figure 1 Open in figure viewer PowerPoint (a) The relationship between Hpo and apo. (b–j) Frequency distributions of the occurrence of Kp, Hp60, and Hp30 values for different years. (k) Monthly mean values of ap, ap60, and ap30 during 1995–2020. The total sunspot number is also indicated. Hp30 and Hp60, along with their corresponding ap30 and ap60, are archived since 1995 and available, in near real time, from the GFZ website (https://www.gfz-potsdam.de/en/hpo-index) with DOI (https://doi.org/10.5880/Hpo.0002) under the CC BY 4.0 license (Matzka et al., 2022, for data publication). 3 Some Properties of Hpo The frequency distributions of the occurrence of Hp30, Hp60, and Kp values are compared in Figures 1b–1j for every 3-year interval from 1995 to 2021. The distribution pattern of Kp is different in different solar cycle phases. For instance, during the solar minimum years 2007–2009 (Figure 1f) and 2019–2021 (Figure 1j), the occurrence rate of low Kp values (e.g., Kp ≤ 1o) is appreciably higher than during the solar maximum years 2001–2003 (Figure 1d) and 2013–2015 (Figure 1h). Hp30 and Hp60 reproduce different distribution patterns of Kp well, even for the later years not used in the construction of the conversion tables defining the H indices. The agreement of Hp30 and Hp60 with Kp during 2018–2021 (Figure 1j) suggests that the conversion tables for H indices are valid beyond the period 1995–2017. The linearly scaled ap30 and ap60 indices are suitable for assessing average geomagnetic activity over a certain period. Monthly mean values of ap30 and ap60 are plotted in Figure 1k. They are in good agreement with monthly mean ap, showing 11-year solar-cycle variation. The sunspot number is also displayed in Figure 1k for comparison. Geomagnetic activity is known to be highest during the declining phase of solar cycle due to the effects of recurrent high speed solar wind streams (Lockwood et al., 1999). In Figure 2, Hp30 (top), Hp60 (middle), and Kp (bottom) are compared with other geospace indices. The left panels show comparisons with Newell's coupling function (Newell et al., 2007), which is a measure of the energy input from the solar wind into the magnetosphere. The coupling function was derived using OMNI 5-min solar wind data (King & Papitashvili, 2005). Panels in the middle and right columns show comparisons with AE and PC indices, respectively. The AE index is a measure of auroral electrojet activity based on geomagnetic field measurements in the auroral region. The PC index represents geomagnetic activity in the polar region (Troshichev et al., 1988). Following Stauning (2007), the average of the PC indices from the northern (PCN) and southern (PCS) hemispheres were calculated using non-negative values. For comparisons with Hpo and Kp indices, 5-min solar wind data and 1-min AE and PC indices were averaged over every 30-min intervals. Hp60 and Kp are assumed to remain the same within their temporal windows. The solar wind data were shifted by 20 min to account for the delay due to energy transfer from the bow shock to the ionosphere (Manoj et al., 2008). Details are in the caption following the image Figure 2 Open in figure viewer PowerPoint Dependence of (a–c) Hp30, (d–f) Hp60, and (g–i) Kp on (a, d, g) Newell's solar-wind coupling function, (b, e, h) AE index, and (c, f, i) PC index. For the PC index, the average of the northern (PCN) and southern (PCS) indices is used, considering only their positive values. In each panel, black dots indicate the average of the solar-wind coupling function, AE or PC index at each Hpo or Kp value (0o, 0+, 1−, …, 9−), with error bars representing the standard deviation and the green curve representing the best-fitting third-order polynomial function for Hpo or Kp below 9o. The gray dots in panels (a–f) are individual data points for Hpo ≥9o, and the yellow dot is their average value. The solar-wind coupling function, AE and PC indices averaged at each value of Hpo and Kp from 0o to 9− are indicated in Figure 2 by black dots, with error bars representing the standard deviation. Curves in green show the best-fitting third-degree polynomial function for Hpo and Kp below 9o. The fitted curves for Hp30, Hp60, and Kp are similar to each other. The results suggest that for Hpo <9o, the dependence of Hp30 and Hp60 on the solar-wind coupling function, AE and PC indices is consistent with that of Kp. For Hpo ≥9o, the number of data points is rather small, and thus the average solar-wind coupling function, AE and PC indices were not calculated for each Hpo value. Instead, a single average value was derived using all the data corresponding to Hpo ≥9o (gray dots), which is indicated by the yellow dot in each panel of Figures 2a–2f. It is seen that the average value falls near the polynomial curve derived from the data for Hpo <9o. The results suggest that Hp30 and Hp60 can represent geomagnetic activity for Hpo ≥9o in the manner expected from their behavior for Hpo <9o. The behavior of Hpo at its high end is further illustrated in Figure 3 based on five geomagnetic storm events. The selected geomagnetic storms are those in November 2003, March 2001, October 2003, November 2004, and July 2002, which are the five most intense geomagnetic storms during the period considered in this study (1995–2021) according to the minimum value of the Dst index. The left panels show time series of Hp30, Hp60, and Kp, as well as the Dst index, over a 7-day interval, in which the third day corresponds to the storm main phase. The temporal evolution of Kp is generally well captured by Hp30 and Hp60. Variations within 3 hr are seen in Hp30 and Hp60, which are not resolved by Kp. The maximum values of Hp30, Hp60, Kp, and the minimum value of Dst are (9−, 9−, 9−, and −422) for the November 2003 event, (10o, 10−, 9−, and −387) for the March 2001 event, (12−, 12−, 9o, and −383) for the October 2003 event, (11−, 9−, 9−, and −374) for the November 2004 event and (11o, 11o, 9o, and −300) for the July 2002 event. Thus, according to Hpo, the October 2003 event is the strongest among the five. Hpo ≥9o is seen mainly during the storm main phase, when the Dst index rapidly decreases. The right panels compare the 3-hourly mean of Hp30 (calculated from ap30) and Kp. The correlation is rather good; the correlation coefficient r is greater than 0.98 in all cases. Similarly good correlation is found for the comparison between 3-hourly mean of Hp60 and Kp (not shown here). These results suggest that Hpo can represent geomagnetic activity in a similar way as Kp even during the strongest geomagnetic storms. Details are in the caption following the image Figure 3 Open in figure viewer PowerPoint (a, c, e, g, i) Time series of Kp, Hp60, Hp30, and Dst over a 7-day interval during strong geomagnetic storm events. Zero in the horizontal axis corresponds to 00 UT of the day with the storm main phase. (b, d, f, h, j) Comparison of Kp and three-hourly average of Hp30 during the strong geomagnetic storm events. The 3-hourly average of Hp30 is derived from the corresponding values of ap30. The correlation coefficient r is also indicated. To provide some insight into variations of Hp30 and Hp60 within 3 hr, Figure 4 depicts the response of ap30, ap60, and ap to isolated substorms. The substorm onset list based on the technique described by Newell and Gjerloev (2011a) was obtained from the SuperMAG website (https://supermag.jhuapl.edu/). We selected isolated substorm events where there is no other substorm onset in the preceding 6 hr and following 12 hr. A total of 1947 isolated substorm events have been identified during 1995–2018. Figure 4a shows the variation of the AE index averaged over those substorm events. The average AE index peaks approximately 1 hr after the onset, and decays gradually to go back to the pre-onset level in 3–4 hr. The average ap30 and ap60 indices (Figures 4b and 4c) show the increase and decrease of geomagnetic activity that occur within 3 hr around the substorm onset. ap (Figure 4d) is not able to fully resolve such a short-term variation due to its low time resolution. The results suggest that variation of Hpo within 3 hr can contain physically meaningful information, which is not resolved by Kp. Details are in the caption following the image Figure 4 Open in figure viewer PowerPoint Superposed epoch analysis of (a) AE, (b) ap30, (c) ap60, and (d) Kp over 1947 isolated substorm events identified during 1995–2018 based on the method of Newell and Gjerloev (2011a). Error bars represent the standard error of the mean. Zero in the horizontal axis corresponds to the substorm onset. 4 Summary and Outlook We have described a group of new open-ended geomagnetic activity indices Hpo. Hourly (Hp60) and half-hourly (Hp30) indices, along with their linearly scaled counterparts (ap30 and ap60), are available in near real time from the GFZ website (https://www.gfz-potsdam.de/en/hpo-index) with DOI (Matzka et al., 2022). Important properties of Hpo that are revealed by our initial analysis can be summarized as follows: The frequency distributions of the occurrence of Hp30 and Hp60 values are consistent with that of Kp at different phases of the solar cycle (Figures 1a–1i). Month-to-month variations of Hp30 and Hp60 are consistent with that of Kp (Figure 1k). The relationships between Hpo indices and Newell's solar wind coupling function, AE and PC indices are similar to those between Kp and these three quantities. Hp30 and Hp60 can capture temporal variation of Kp during strong geomagnetic storm events (Figure 3). Hp30 and Hp60 can reproduce short-term variation of geomagnetic activity within 3 hr associated with substorms (Figure 4). These results demonstrate that Hpo can be used as a higher time-resolution alternative to Kp. Indeed, there are already a few studies that utilized Hpo for its advantage over Kp. Yamazaki et al. (2021) used Hp30 to select quiet-time measurements of the geomagnetic field from Swarm satellites. The orbital period of a Swarm satellite is approximately 90 min, thus using Hp30, geomagnetic activity can be evaluated for every one third of the orbit, while there is only one Kp value for every two orbits. The high-cadence output of Hpo enables a more accurate selection of quiet-time data than the three-hourly Kp index. Bruinsma and Boniface (2021) used Hp60 to drive a recent version of the Drag Temperature Model, DTM-2020, which is a semi-empirical model of the Earth's thermosphere, developed for orbit determination and prediction of spacecraft and debris. They showed that the use of Hpo leads to the improvement of the model compared with the predecessor model DTM-2013 (Bruinsma, 2015) that is driven by Kp. Similarly, Hpo may be used for improving other geospace models driven by Kp. Recalibration and validation are recommended when Hpo is used as an input for existing models that are parameterized with Kp. |
2024.05.11 08:46 Superb_Option_3148 Holy Trine🕉Moola Trikona
The 1st, 5th, and 9th houses in a birth chart are considered the "holy trine" in Nadi astrology. These houses represent fundamental aspects of human existence: submitted by Superb_Option_3148 to Vedanga_Nadi_Jyotish [link] [comments] 1st House: Self, identity, physical body 5th House: Creativity, children, pleasure 9th House: Dharma (righteousness), father, long journeys, higher knowledge Their strong placement in a birth chart is believed to be auspicious. In the age-old wisdom of Nadi astrology, the 1st, 5th, and 9th houses are considered to be "MoolaTriKonam," prime and most pious, as well as an absolute necessity to sustain life in human form with an embodied body as a vehicle. Vedanga Jyotish is from the Vedas, and the Vedic Gods, both in male as well as female archetypes, describe triplicity in a plethora of canons from Brahma, Vishnu, Maheshwar, etc... Nine planets in Vedic astrology are assigned with three nakshatras each, and the formulation of Vimshottrai nakshatra dasha years allocation is done 3 * 120° = 360° zodiac. Vimshottri Dasha timekeeping relies on Earth's rotation to determine the passage of time (hours, minutes, seconds). Historically, the Moon's phases were used for timekeeping. The synodic month (time between full moons) is a natural unit that many cultures observed. However, modern timekeeping primarily relies on the Earth's rotation on its axis, which is a more consistent and precise measurement. Time is calculated using the space between 🌎 & Moon basis Moon's route. In Vedanga Jyotish, time is nothing but the passage of Moon along the dynamic always in the move Rashi Mandalam ♈ to ♓. Nakshatras and the Moon's Path: The concept of nakshatras as fixed positions along the ecliptic (Moon's apparent path) is accurate. From Earth's perspective, the Moon appears to traverse these constellation layers too, beneath the dynamic zodiac layer. Rashi Mandalam is the static Nakshatra Mandalam. The Moon's elliptical path does make the time it takes to cross each nakshatra vary slightly. Twenty-seven nakshatras from the Moon's elliptical path from the point of view of Earth, any given nakshatra runs 24-27½ hours. For our practical purposes, we are using a 24-hour uniform day basis, UTC. Using latitude & longitude from a given point on our planet 🌎, if we map the Moon using Vedanga Jyotish, the nakshatras are fixed; this layer doesn't move, but the Moon's elliptical path passing through, not in a uniform manner from Earth's point of view. Thus, Vimshottrai dasha system has unequal year allocation to planetary dasha: Ketu 7 years, Venus 20 years, Sun 6 years, Moon 10 years, Mars 7 years, Rahu 18 years, Guru 16 years, Shani 19 years, Mercury 17 years. Ashwini - Beta Arietis, Bharani - 35 Arietis, Krittika - Pleiades (also known as the Seven Sisters or Messier 45), Rohini - Aldebaran (Alpha Tauri), Mrigashira - Lambda Orionis, Ardra - Betelgeuse (Alpha Orionis), Punarvasu - Castor (Alpha Geminorum), Pushya - Theta Cancri, Ashlesha - Epsilon Hydrae, Magha - Regulus (Alpha Leonis), Purva Phalguni - Delta Leonis, Uttara Phalguni - Beta Leonis, Hasta - Delta Corvi, Chitra - Spica (Alpha Virginis), Swati - Arcturus (Alpha Bootis), Vishakha - Alpha Librae, Anuradha - Delta Scorpii, Jyeshtha - Alpha Scorpionis, Mula - Lambda Scorpii, Purva Ashadha - Delta Sagittarii, Uttara Ashadha - Sigma Sagittarii, Shravana - Alpha Aquilae, Dhanishta - Alpha Delphini, Shatabhisha - Lambda Aquarii, Purva Bhadrapada - Alpha Pegasi, Uttara Bhadrapada - Gamma Pegasi, Revati - Zeta Piscium. Here's where the logic gets a bit more complex: Vimshottri Dasha assigns varying lengths to planetary periods based on using latitude & longitude computing Moon position. We work our time in Earth-like ° ' ". Equal hours, minutes, seconds. So the point I am trying to underscore is if we compute from the Moon the distance using latitude and longitude of 27 nakshatras in our Milky Way from Earth's perspective, those emerging ° ' ", we get the logic behind Vimshottrai dasha splits duration in years. Here, if you can think in real-time of the Moon's position in the sidereal sky ° ‘ “ which is perceived as an elliptical clock for measurement 360° & this is what corresponds with HH:MM:SS with our human modern-day ⏰ |
2024.05.11 00:36 Many-Director3375 Value changes if print() is called before return statetment
final PolylabelResult center = polylabel([centers]); print(center); // printed value = PolylabelResult(Point(48.958880595, -0.28536779505776355), distance: 0.0003213244381694669) return LatLng(center.point.x.toDouble(), center.point.y.toDouble());Then the same flutter code version 2 without print():
final PolylabelResult center = polylabel([centers]); return LatLng(center.point.x.toDouble(), center.point.y.toDouble());The value returned by version 1 is : LatLng(latitude:48.958881, longitude:-0.285368)
2024.05.10 02:13 Robd63 Horizontal Coriolis Misunderstanding
2024.05.09 05:37 silentdawe01 AHK V1 Checksum Calculation XOR
cs1 ^= currByte} MsgBox % test2 " " cs1
2024.05.08 14:21 Alexechr [Request] How fast would I need to travel north to keep the sun in the same place?
2024.05.08 08:15 car_civteach20 Trying to understand triangulation
2024.05.07 19:23 ConsciousRun6137 Missed or Dismissed
Well, quickly, here’s my story... submitted by ConsciousRun6137 to u/ConsciousRun6137 [link] [comments] Archaeology fever first seized me at the age of ten. I was intrigued by British explorer Percy Fawcett's amazing trek into the Amazon jungle. After reporting his discovery of a dead, ancient, vine-choked city, he went back in... and vanished! You know what? My first expedition was into that same unexplored region … where pygmies shrank human heads to the size of your fist! https://preview.redd.it/odvg2u9nc1zc1.png?width=170&format=png&auto=webp&s=392096f851a2180ecba376981446009ab60f4691 This search for ancient mysteries was to take me through more than 30 countries. Soon I began to stumble upon something that truly shocked me!... you would call them “out of place” artifacts. Did I say shocked? They blew me over! Because, according to what we were taught in school, these should never exist! And they weren’t just in one place. There was a global pattern to them. his pattern showed a lost super science and technology. That’s when I knew someone had to speak up. I knew this content was of tremendous value. For example: EGYPT 2000 BCDid you know that the Egyptians bored into granite rock with drills that turned 500 times faster than modern power drills?And how about this?: * Coin-slot machines * Steam engines * Speedometers * Machine-cut optical lenses * Screen projectors Did you know that Egyptian dentists worked with cement fillings, dental bridges... and inserted artificial teeth? That William Deiches found working diagrams in Tutankhamen's tomb (1545 BC) which helped him to build real model plane that flew? That Egyptian doctors performed pregnancy tests, determined the sex of an unborn child, fed nourishment through tubes, and fitted artificial legs and hands? That they used anaesthetic and sophisticated instruments to perform bone and brain surgery? What about old Egypt’s mysterious moving walls, automatically flashing lights and lamps that shone century after century, non-stop? And how were sound waves used to open doors in ancient Karnak, Abydon and Thebes? CHINADid you know that about 1000 BC nerve gas was used in warfare?That 2,500 years ago "mirrors" were invented which, set up in pairs, could transmit messages, like television? THE BIGGEST PYRAMID: One hundred pyramids have been discovered in Shensi Province of China. The largest – according to one claim 1,200 feet high, 2½ times the height of Egypt’s Great Pyramid – could, if hollow, swallow 26 Empire State buildings. https://preview.redd.it/w0cn3os2e1zc1.png?width=150&format=png&auto=webp&s=bac08cd1df7c724dcdfda6f75997092e7e7e3b75 Did you know that X-rays were used in ancient China? And heart transplant operations were carried out? And what about the 716 rhythmically pulsating “electric” disks (similar to computer disks) found in caves in the Bayan-Kara-Ula mountains? THE AMERICAS Have you heard how Captain Don Henry discovered a 400 foot pyramid on the seabed off Florida? Did you know that a Chinese expedition surveyed all of North America in 2200 BC. They saw a sunrise over the Grand Canyon, black opals and gold nuggests in Nevada, and seals frolicking in San Francisco Bay! And would you believe the ancient Maya used screw propellors, had books with gold leaves... and even diving suits? Have you heard about the Mayan surgical instruments that were a thousand times sharper than modern platinum-plus blades? And Mayan dental crowns and fillings still hold after 1,500 years! How does a bird in the Andes turn hard rock to be soft like putty? Did the ancients know this formula? And did you know that the Inca soaked in bathtubs of gold and silver, fed by water pipes of silver and gold? That in the city of Pachacamac in Peru the temple stones were fastened with gold nails that weighed a ton! That in 1963, Peruvian surgeon Francisco Grano saved a patient’s life, using surgical instruments from a 3,000 year old tomb? And how about this?: On a plain in Peru can be found 4-dimensional art with several faces that disappear or change into other figures, according to your position or that of the sun. And this? From a dead, vine-choked jungle city, have been brought out exquisitely stamped 30 foot long rolls of sheet metal “wall paper” jointed together with tiny rivets! Then there is the mystery of that vast artificial tunnel in a remote corner of Ecuador, which was explored by astronaut Neil Armstrong. And did you know that an Amazon tribe, since the 1970s, has escaped the encroaching world by retreating to an abandoned underground city! THE MEDITERRANEAN Have you heard of ancient Greece’s chemical warfare weapon – fire that burned in water – which was self-igniting? Did you know about their devices for the automatic opening and closing of doors! Their sewage system from every house, as good as any today? And how about the computer that calculated the positions of the planets, the rising and setting of the moon, tide movements and time of day… and could establish a ship’s position anywhere on earth out of sight of land, or at night. Precise mechanics, as good as any we can produce today. 55 BC And what do you think about these?: - Drills that bored holes finer than the thinnest needle - Taxis with speedometers - Shatter proof sheet glass - A machine tool for cutting screws - A machine for boring tunnels - Petrol vapor machines - A human-like robot that could walk… and almost ran away! And luxury ocean liners with swimming pools. RUSSIA Have you heard of the 15 minute “movie” of luminous pictures that activates each day as the sun sets over the Onega River in Russia? MICRO TECHNOLOGY: 40 feet underground in Russia’s Ural region, gold prospectors are finding ANCIENT, man-made, spiral-shaped artifacts. These microscopically TINY artifacts are the product of some inexplicable and highly advanced technology. They resemble control elements used in micro-miniature devices in our latest technology “nano-machines”. BABYLON, FERTILE CRESCENT And what about these?: - Protective face masks for patients undergoing radiation therapy - Eye cataract operations - Electric dry-cell batteries - Use of sound waves to lift heavy weights MASSIVE WEIGHTS LIFTED: Ancient 2,000 ton stone building blocks in Lebanon were raised 20 feet above the ground. No modern crane can budge, lift or transport, such titanic blocks. AUSTRALIA A wrecked Phoenician ship has been found on the West Australian coast. And other Phoenician evidence is found around the country. Near Sarina, Queensland, Val Osborne has discovered remains of a Phoenician port and mine from 900 BC. Traditional theory says this can’t be! So with all the publicity, the Central Queensland University reportedly sent an archaeologist to the site, instructing him to ‘find nothing’. Osborne commented, “It’s like that – when they don’t want the truth to be known, they will deny it. Numerous traces of Egyptian colonisation in Australia have also been unearthed or discovered, such as coins, statues, inscriptions and a tomb, as well as Egyptian customs, religious features and words among the Aborigines.… Oh yes, and pyramids. INDIA, PAKISTAN How about this? Advanced mathematics, with measurements in micro-fractions – down to the disintegration rates of sub-atomic particles (1/300 millionth of a second). I ask you, how could these have been measured without precision instruments? And why did they find it necessary to measure them? https://preview.redd.it/1g9unwarf1zc1.png?width=150&format=png&auto=webp&s=d3e7233a21c04e52f22be0cb52722b9c605c3876 Why were 44 skeletons dug up in Pakistan found to be radioactive? 2000 BC MAN TURNED TO GLASS: In a forest of NE India, explorer-hunter H.J. Hamilton received a substantial shock when he entered ancient ruins. On a chair, made of the same ‘crystal’ as the walls, an odd shape was crouching, with vaguely human features. At first he thought it to be a statue damaged by time. But then he was filled with horror: under the ‘glass’ which covered that ‘statue’ a skeleton could clearly be seen! Melted, crystallized! Anything else? Plenty: - Upstairs bedrooms having en suite bathrooms with hot and cold running water on tap, and flush toilets. - Aluminium cups, thimbles, and so on - Plastic surgery, including nose transplants - Fluoroscopy (X-ray devices) - Chemical and biological warfare, and nerve gas THE WORLD Discover the two 70 foot towers in Ahmedabad, that sway to and fro in rhythm with each other. Were you aware that in at least 64 scientific achievements an ancient civilization surpassed us? https://preview.redd.it/co3smyy6g1zc1.png?width=150&format=png&auto=webp&s=bba6f44d0f00ee90a4f38464692f47aabf382773 https://preview.redd.it/bi6664q7g1zc1.png?width=150&format=png&auto=webp&s=1a1a91bed45b2bb796543c35e0d85492dd4303c3 ANCIENT WORLD SURVEY?: Although Antarctica’s existence was not verified until 1819, an ancient map shows that continent as it was BEFORE the ice covered it – with rivers and fjords precisely where today mile-thick glaciers flow, and a mountain range undiscovered until 1952. Longitude and latitude are shown correctly, with a grid system similar to modern air navigation maps. 2000 BC. Did you know about the mystery satellite orbiting the poles, but not put there by any modern nation? That man knew the secret of flight before the 20th century? That early cave men wore “modern” clothes like ours? And what about the cities illuminated by means of electricity unknown to us today? Do you know what 4,000 year old aerial surveillance techniques were used in the Iraq war? http://www.beforeus.com/ |