ARC environment classes

ARC Fitness

Factory


source

ARCFitnessFactory

 ARCFitnessFactory ()

Initialize self. See help(type(self)) for accurate signature.

BaseARCFitness


source

BaseARCFitness

 BaseARCFitness ()

Base class of an ARCFitness. This class is not used directly by developers, but defines the functionality common to all.

SumSquareOfDiff


source

SumSquareOfDiff

 SumSquareOfDiff ()

A function that sums the square of the diffrences of two arrays.

AverageMaxOfDiff


source

AverageMaxOfDiff

 AverageMaxOfDiff ()

A function that sums the square of the diffrences of two arrays.

Euclidean


source

Euclidean

 Euclidean ()

A function that sums the square of the diffrences of two arrays.

# import random
# random.seed(1)
np.random.seed(1)
# Create a 2-dimensional 3 by 3 numpy array of random floats from 0 to 9
random_array = np.random.uniform(4, 5, (3, 3))
print(random_array)
array_of_fours = np.full((3, 3), 4)
print(array_of_fours)
euc = Euclidean()
metric = euc(array_of_fours, random_array,  ['cells'])
print(metric)
np.random.seed(2)
random_array = np.random.uniform(4.49, 4.5, (3, 3))
print(random_array)
metric = euc(array_of_fours, random_array,  ['cells'])
print(metric)
[[4.417022   4.72032449 4.00011437]
 [4.30233257 4.14675589 4.09233859]
 [4.18626021 4.34556073 4.39676747]]
[[4 4 4]
 [4 4 4]
 [4 4 4]]
1.0610244787055576
[[4.49435995 4.49025926 4.49549662]
 [4.49435322 4.49420368 4.49330335]
 [4.49204649 4.49619271 4.49299655]]
1.4810793133120121

ARCDataProcessor


source

ARCDataProcessor

 ARCDataProcessor (config_dict, arc_dict)

Initialize self. See help(type(self)) for accurate signature.

# Example usage:
print('Example using dims only')
config_dict = {
    'control_set': ['dims'],
    'input_set': ['env'],
    'dataset': 'train',
    'index': 0
}

arc_dict = {
    'test': [{'input': [[7, 0, 7], [7, 0, 7], [7, 7, 0]]}],
    'train': [
        {'input': [[0, 7, 7], [7, 7, 7], [0, 7, 7]], 'output': [[0, 0, 0, 0, 7, 7, 0, 7, 7], [0, 0, 0, 7, 7, 7, 7, 7, 7], [0, 0, 0, 0, 7, 7, 0, 7, 7], [0, 7, 7, 0, 7, 7, 0, 7, 7], [7, 7, 7, 7, 7, 7, 7, 7, 7], [0, 7, 7, 0, 7, 7, 0, 7, 7], [0, 0, 0, 0, 7, 7, 0, 7, 7], [0, 0, 0, 7, 7, 7, 7, 7, 7], [0, 0, 0, 0, 7, 7, 0, 7, 7]]}
        # Add more entries as needed
    ]
}

gp = ARCDataProcessor(config_dict, arc_dict)
info = gp.create_info()
print(info)
ins = gp.get_env_inputs_names()
print('names', ins)
inds = gp.get_env_inputs_indexes()
print('indexes', inds)

state, info = gp.get_state()
print(info)
print('fitness', gp.fitness_function(), state)
print()

for i in range(-2,-4,-1):
    actions = [i]
    gp.apply_actions(actions)
    state, info = gp.get_state()
    print(info)
    # print(len(values))
    print('fitness', gp.fitness_function(), state)
    print()
Example using dims only
{'num_actions': 1, 'grid_shape': 'equal', 'dims': 1}
names ['IWE']
indexes [0]
{'num_actions': 1, 'grid_shape': 'equal', 'dims': 1}
fitness 72 {'inputs': {'dims': {'env': (3,)}}}

{'num_actions': 1, 'grid_shape': 'equal', 'dims': 1}
fitness 128 {'inputs': {'dims': {'env': (1,)}}}

{'num_actions': 1, 'grid_shape': 'equal', 'dims': 1}
fitness 128 {'inputs': {'dims': {'env': (1,)}}}
# Example usage:
print('Example using env inputs only')
config_dict = {
    'control_set': ['cells'],
    'input_set': ['env'],
    'dataset': 'train',
    'index': 0
}

gp = ARCDataProcessor(config_dict, arc_dict)
info = gp.create_info()
print(info)
ins = gp.get_env_inputs_names()
print('names', ins)
inds = gp.get_env_inputs_indexes()
print('indexes', inds)

state, info = gp.get_state()
print(info)
print('fitness', gp.fitness_function(), state)
print()

for i in range(-2,-4,-1):
    actions = [i for j in range(info['num_actions'])]
    print(actions)
    gp.apply_actions(actions)
    state, info = gp.get_state()
    print(info)
    # print(len(values))
    print('fitness', gp.fitness_function(), state)
    print()
Example using env inputs only
{'num_actions': 9, 'env': (3, 3)}
names ['IE001', 'IE002', 'IE003', 'IE004', 'IE005', 'IE006', 'IE007', 'IE008', 'IE009']
indexes [0, 1, 2, 3, 4, 5, 6, 7, 8]
{'num_actions': 9, 'env': (3, 3)}
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
Cell In[52], line 21
     19 state, info = gp.get_state()
     20 print(info)
---> 21 print('fitness', gp.fitness_function(), state)
     22 print()
     24 for i in range(-2,-4,-1):

Cell In[50], line 225, in ARCDataProcessor.fitness_function(self)
    223 output_array = self.get_output(self.dataset)
    224 env_array = self.env
--> 225 return self.fitness_function_arrays(output_array, env_array)

Cell In[50], line 196, in ARCDataProcessor.fitness_function_arrays(self, output_array, env_array)
    193 def fitness_function_arrays(self, output_array, env_array):
--> 196     return self.fitness_function_type(output_array = output_array, env_array = env_array, control_set = self.control_set)

Cell In[46], line 12, in SumSquareOfDiff.__call__(self, output_array, env_array, control_set)
     10 element_metric = 0
     11 if 'cells' in control_set:
---> 12     diff = env_array[:output_array.shape[0], :output_array.shape[1]] - output_array
     13     diff = np.where(np.isnan(diff), 0, diff)
     14     diff = np.where(np.isnan(env_array[:output_array.shape[0], :output_array.shape[1]]), 10, diff)

ValueError: operands could not be broadcast together with shapes (3,3) (9,9) 
# Example usage:
print('Example using cells only')
config_dict = {
    'control_set': ['cells'],
    'input_set': ['env','inputs'],
    # 'input_set': ['env','inputs', 'outputs'],
    'dataset': 'train',
    'index': 0
}

gp = ARCDataProcessor(config_dict, arc_dict)
info = gp.create_info()
print(info)

ins = gp.get_env_inputs_names()
print('names', ins)
inds = gp.get_env_inputs_indexes()
print('indexes', inds)

state, info = gp.get_state()
print(info)
print('fitness', gp.fitness_function(), state)
print()
Example using cells only
{'num_actions': 9, 'env': (3, 3), 'inputs': (3, 3)}
names ['IE001', 'IE002', 'IE003', 'IE004', 'IE005', 'IE006', 'IE007', 'IE008', 'IE009', 'II001', 'II002', 'II003', 'II004', 'II005', 'II006', 'II007', 'II008', 'II009']
indexes [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
{'num_actions': 9, 'env': (3, 3), 'inputs': (3, 3)}
fitness 2143.0 {'inputs': {'cells': {'env': array([[0., 7., 7.],
       [7., 7., 7.],
       [0., 7., 7.]], dtype=float32), 'inputs': array([[0, 7, 7],
       [7, 7, 7],
       [0, 7, 7]])}}}

ARCEnv


source

ARCEnv

 ARCEnv (namespace='')

*The main OpenAI Gym class.

It encapsulates an environment with arbitrary behind-the-scenes dynamics. An environment can be partially or fully observed.

The main API methods that users of this class need to know are:

  • :meth:step - Takes a step in the environment using an action returning the next observation, reward, if the environment terminated and observation information.
  • :meth:reset - Resets the environment to an initial state, returning the initial observation and observation information.
  • :meth:render - Renders the environment observation with modes depending on the output
  • :meth:close - Closes the environment, important for rendering where pygame is imported

And set the following attributes:

  • :attr:action_space - The Space object corresponding to valid actions
  • :attr:observation_space - The Space object corresponding to valid observations
  • :attr:reward_range - A tuple corresponding to the minimum and maximum possible rewards
  • :attr:spec - An environment spec that contains the information used to initialise the environment from gym.make
  • :attr:metadata - The metadata of the environment, i.e. render modes
  • :attr:np_random - The random number generator for the environment

Note: a default reward range set to :math:(-\infty,+\infty) already exists. Set it if you want a narrower range.*

Example

import pygame

# Example usage:
if os.name == 'nt': 
    props = {'dir': 'C:\\packages\\arc-prize-2024', 'file_prefix':'arc-agi_training_', 'code':'007bbfb7', 'control_set': ['dims'], 'input_set': ['env'],'dataset': 'train'}
    file_name = os.path.join(props['dir'], props['file_prefix']) + 'challenges.json' 
    challenges_manager = ChallengesDataManager(file_name)
    data = challenges_manager.get_data_for_key(props['code'])
    print(data)
    # arc_dict={}
    # arc_dict['data'] = data
    arc_env = ARCEnv()
    arc_env.initialise(props, data)
    arc_env.render()
    #    print(state, fitness, done)
    # print(arc_env.dimensions)
    for i in range(6):
        state, fitness, done, info = arc_env.step([1])
        # print(state, fitness, done)
        print(state, fitness, done, info)
        arc_env.render()
        sleep(1)
{'test': [{'input': [[7, 0, 7], [7, 0, 7], [7, 7, 0]]}], 'train': [{'input': [[0, 7, 7], [7, 7, 7], [0, 7, 7]], 'output': [[0, 0, 0, 0, 7, 7, 0, 7, 7], [0, 0, 0, 7, 7, 7, 7, 7, 7], [0, 0, 0, 0, 7, 7, 0, 7, 7], [0, 7, 7, 0, 7, 7, 0, 7, 7], [7, 7, 7, 7, 7, 7, 7, 7, 7], [0, 7, 7, 0, 7, 7, 0, 7, 7], [0, 0, 0, 0, 7, 7, 0, 7, 7], [0, 0, 0, 7, 7, 7, 7, 7, 7], [0, 0, 0, 0, 7, 7, 0, 7, 7]]}, {'input': [[4, 0, 4], [0, 0, 0], [0, 4, 0]], 'output': [[4, 0, 4, 0, 0, 0, 4, 0, 4], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 4, 0, 0, 0, 0, 0, 4, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 4, 0, 4, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 4, 0, 0, 0, 0]]}, {'input': [[0, 0, 0], [0, 0, 2], [2, 0, 2]], 'output': [[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 2], [0, 0, 0, 0, 0, 0, 2, 0, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 2, 0, 0, 0, 0, 0, 2], [2, 0, 2, 0, 0, 0, 2, 0, 2]]}, {'input': [[6, 6, 0], [6, 0, 0], [0, 6, 6]], 'output': [[6, 6, 0, 6, 6, 0, 0, 0, 0], [6, 0, 0, 6, 0, 0, 0, 0, 0], [0, 6, 6, 0, 6, 6, 0, 0, 0], [6, 6, 0, 0, 0, 0, 0, 0, 0], [6, 0, 0, 0, 0, 0, 0, 0, 0], [0, 6, 6, 0, 0, 0, 0, 0, 0], [0, 0, 0, 6, 6, 0, 6, 6, 0], [0, 0, 0, 6, 0, 0, 6, 0, 0], [0, 0, 0, 0, 6, 6, 0, 6, 6]]}, {'input': [[2, 2, 2], [0, 0, 0], [0, 2, 2]], 'output': [[2, 2, 2, 2, 2, 2, 2, 2, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 2, 2, 0, 2, 2, 0, 2, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 2, 2, 2, 2, 2, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 2, 2, 0, 2, 2]]}]}
{'inputs': {'dims': {'env': (4,)}}} 50 True {'num_actions': 1, 'grid_shape': 'equal', 'dims': 1}
{'inputs': {'dims': {'env': (5,)}}} 32 True {'num_actions': 1, 'grid_shape': 'equal', 'dims': 1}
{'inputs': {'dims': {'env': (6,)}}} 18 True {'num_actions': 1, 'grid_shape': 'equal', 'dims': 1}
{'inputs': {'dims': {'env': (7,)}}} 8 True {'num_actions': 1, 'grid_shape': 'equal', 'dims': 1}
{'inputs': {'dims': {'env': (8,)}}} 2 True {'num_actions': 1, 'grid_shape': 'equal', 'dims': 1}
{'inputs': {'dims': {'env': (9,)}}} 0 True {'num_actions': 1, 'grid_shape': 'equal', 'dims': 1}
sleep(2)
arc_env.close()