complexity int64 1 56 | n_identifiers int64 1 114 | code stringlengths 19 12.7k | path stringlengths 8 134 | n_ast_nodes int64 12 2.35k | ast_errors stringlengths 0 4.01k | repo stringlengths 3 28 | documentation dict | n_words int64 2 866 | language stringclasses 1
value | vocab_size int64 2 323 | commit_id stringlengths 40 40 | file_name stringlengths 5 79 | id int64 243 338k | nloc int64 1 228 | token_counts int64 5 1.4k | fun_name stringlengths 1 77 | url stringlengths 31 60 | commit_message stringlengths 3 15.3k | n_whitespaces int64 1 3.23k | n_ast_errors int64 0 20 | d_id int64 74 121k | ast_levels int64 4 29 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
10 | 26 | def functions_df() -> pd.DataFrame:
modules = all_view_models()
all_formatted = []
for module in modules:
if not FORECASTING and "forecast" in str(module):
continue
loaded = load_modules(module)
# Gets all of a module's functions, but ignores imported functions
... | openbb_terminal/core/scripts/sdk_audit.py | 286 | OpenBBTerminal | {
"docstring": "Creates a dataframe for all functions in 'models' and 'views'.\n\n Returns:\n ----------\n pd.DataFrame\n Information for all view and model functions\n ",
"language": "en",
"n_whitespaces": 39,
"n_words": 20,
"vocab_size": 16
} | 109 | Python | 78 | 963ca9b2b924d0514e0e65243dc8d9d7af023ad1 | sdk_audit.py | 286,657 | 32 | 169 | functions_df | https://github.com/OpenBB-finance/OpenBBTerminal.git | Audit SDK and View/Model functions (#3384)
* Initial commit
* Finalized functionality
* update script
* Allow using it without forecasting
* Update gitignore
* Update `sdk_audit.py`
* Fixed issues, found more
* Added fix for helper functions, and column for SDK type
* Checked one more thing
... | 267 | 0 | 85,962 | 13 | |
4 | 11 | def train_timer(self, do='start'):
if do == 'start':
self.pair_it_train += 1
self.begin_time_train = time.time()
elif do == 'stop':
end = time.time()
self.train_time += (end - self.begin_time_train)
if self.pair_it_train == self.total_... | freqtrade/freqai/freqai_interface.py | 147 | freqtrade | {
"docstring": "\n Timer designed to track the cumulative time spent training the full pairlist in\n FreqAI.\n ",
"language": "en",
"n_whitespaces": 36,
"n_words": 14,
"vocab_size": 13
} | 61 | Python | 47 | 96d8882f1e6740f6c0a859c6e5f52a5a30ddb007 | freqai_interface.py | 150,527 | 13 | 79 | train_timer | https://github.com/freqtrade/freqtrade.git | Plug mem leak, add training timer | 214 | 0 | 34,788 | 15 | |
5 | 17 | def caching_device(rnn_cell):
if tf.executing_eagerly():
# caching_device is not supported in eager mode.
return None
if not getattr(rnn_cell, '_enable_caching_device', False):
return None
# Don't set a caching device when running in a loop, since it is possible that
# train steps could be wrappe... | keras/layers/rnn/rnn_utils.py | 178 | keras | {
"docstring": "Returns the caching device for the RNN variable.\n\n This is useful for distributed training, when variable is not located as same\n device as the training worker. By enabling the device cache, this allows\n worker to read the variable once and cache locally, rather than read it every\n time step ... | 202 | Python | 119 | 01c906c4178db5ae03b7eb2d298a052c952a0667 | rnn_utils.py | 268,981 | 25 | 92 | caching_device | https://github.com/keras-team/keras.git | Reorganize RNN layers, cells and wrappers into smaller logically organized files hosted under an `rnn` directory.
PiperOrigin-RevId: 428841673 | 323 | 0 | 79,802 | 11 | |
1 | 5 | def get_tables(self) -> Response:
q = 'SHOW TABLES;'
return self.native_query(q)
| mindsdb/integrations/handlers/tdengine_handler/tdengine_handler.py | 34 | mindsdb | {
"docstring": "\n Get a list with all of the tabels in TDEngine\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | 10 | Python | 10 | 257dfe6bac18d28088c7bfc79ca22cde682f9cd6 | tdengine_handler.py | 116,966 | 6 | 18 | get_tables | https://github.com/mindsdb/mindsdb.git | Added TDENgine Handler | 40 | 0 | 25,874 | 7 | |
3 | 9 | def get_instance(self) -> t.Optional[AnsibleCoreCI]:
if not self.core_ci and self.core_ci_state:
self.core_ci = self.create_core_ci(load=False)
self.core_ci.load(self.core_ci_state)
return self.core_ci
| test/lib/ansible_test/_internal/host_profiles.py | 80 | ansible | {
"docstring": "Return the current AnsibleCoreCI instance, loading it if not already loaded.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | 15 | Python | 13 | 3eb0485dd92c88cc92152d3656d94492db44b183 | host_profiles.py | 268,016 | 6 | 49 | get_instance | https://github.com/ansible/ansible.git | ansible-test - Use more native type hints. (#78435)
* ansible-test - Use more native type hints.
Simple search and replace to switch from comments to native type hints for return types of functions with no arguments.
* ansible-test - Use more native type hints.
Conversion of simple single-line function annota... | 58 | 0 | 79,290 | 11 | |
3 | 14 | def sort_line_bbox(g, bg):
xs = [bg_item[0] for bg_item in bg]
xs_sorted = sorted(xs)
g_sorted = [None] * len(xs_sorted)
bg_sorted = [None] * len(xs_sorted)
for g_item, bg_item in zip(g, bg):
idx = xs_sorted.index(bg_item[0])
bg_sorted[idx] = bg_item
g_sorted[idx] = g_... | ppstructure/table/table_master_match.py | 132 | PaddleOCR | {
"docstring": "\n Sorted the bbox in the same line(group)\n compare coord 'x' value, where 'y' value is closed in the same group.\n :param g: index in the same group\n :param bg: bbox in the same group\n :return:\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 35,
"vocab_size": 22
} | 41 | Python | 26 | ddaa2c2552e19635cd6cdf38619f1f176c358f89 | table_master_match.py | 24,491 | 10 | 85 | sort_line_bbox | https://github.com/PaddlePaddle/PaddleOCR.git | add SLANet | 83 | 0 | 4,742 | 11 | |
2 | 15 | def get_closed() -> pd.DataFrame:
bursa = all_bursa()
is_open_list = []
for exchange in bursa.index:
is_open = check_if_open(bursa, exchange)
is_open_list.append(is_open)
bursa["open"] = is_open_list
bursa = bursa.loc[~bursa["open"]]
return bursa[["name", "short_name"]]
@l... | openbb_terminal/stocks/tradinghours/bursa_model.py | 125 | @log_start_end(log=logger) | OpenBBTerminal | {
"docstring": "Get closed exchanges.\n\n Parameters\n ----------\n\n Returns\n -------\n pd.DataFrame\n Currently closed exchanges\n ",
"language": "en",
"n_whitespaces": 36,
"n_words": 11,
"vocab_size": 10
} | 29 | Python | 23 | 33a041e5bf93ce93ab1a19adbc5ed74c2f1eb337 | bursa_model.py | 284,462 | 19 | 66 | get_closed | https://github.com/OpenBB-finance/OpenBBTerminal.git | Trading hours stock feature (#1697) | 63 | 1 | 84,732 | 10 |
4 | 20 | def get_masks(slen, lengths, causal, padding_mask=None):
bs = shape_list(lengths)[0]
if padding_mask is not None:
mask = padding_mask
else:
# assert lengths.max().item() <= slen
alen = tf.range(slen, dtype=lengths.dtype)
mask = alen < tf.expand_dims(lengths, axis=1)
... | src/transformers/models/flaubert/modeling_tf_flaubert.py | 243 | transformers | {
"docstring": "\n Generate hidden states mask, and optionally an attention mask.\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 9,
"vocab_size": 9
} | 87 | Python | 58 | 31be02f14b1724c677bb2e32a5101c7cb6448556 | modeling_tf_flaubert.py | 33,795 | 17 | 162 | get_masks | https://github.com/huggingface/transformers.git | TF: tf.debugging assertions without tf.running_eagerly() protection (#19030) | 190 | 0 | 6,152 | 15 | |
2 | 19 | def css_install_check(app_configs, **kwargs):
errors = []
css_path = os.path.join(
os.path.dirname(__file__), 'static', 'wagtailadmin', 'css', 'normalize.css'
)
if not os.path.isfile(css_path):
error_hint = % css_path
errors.append(
Warning(
"CSS f... | wagtail/admin/checks.py | 136 | @register(Tags.admin) | wagtail | {
"docstring": "\n Most likely you are running a development (non-packaged) copy of\n Wagtail and have not built the static assets -\n see https://docs.wagtail.org/en/latest/contributing/developing.html\n\n File not found: %s\n ",
"language": "en",
"n_whitespaces... | 38 | Python | 32 | e9183a95c88fe2eaf4c1d3aff9833633509713f3 | checks.py | 70,553 | 21 | 73 | css_install_check | https://github.com/wagtail/wagtail.git | Update docs links to reference new domain | 147 | 1 | 15,519 | 13 |
4 | 4 | async def device_scan(hass, identifier, loop):
| homeassistant/components/apple_tv/config_flow.py | 18 | core | {
"docstring": "Scan for a specific device using identifier as filter.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | 5 | Python | 5 | 7112c5b52a1e0016961a725d4ca90b57ddb350de | config_flow.py | 310,881 | 9 | 77 | device_scan | https://github.com/home-assistant/core.git | Use zeroconf for scanning in apple_tv (#64528) | 8 | 0 | 109,552 | 6 | |
1 | 6 | def __exit__(self, *args) -> None:
raise NotImplementedError(
f"{self.__class__.__name__} does not support context management."
)
| src/prefect/blocks/abstract.py | 40 | prefect | {
"docstring": "\n Context management method for databases.\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 5,
"vocab_size": 5
} | 14 | Python | 14 | e51b790b7717e2603c1ea480c75e9ee02df3c869 | abstract.py | 60,156 | 7 | 17 | __exit__ | https://github.com/PrefectHQ/prefect.git | Abstract database block (#7866)
Co-authored-by: Alexander Streed <desertaxle@users.noreply.github.com>
Co-authored-by: Bill Palombi <bill@prefect.io> | 46 | 0 | 11,998 | 11 | |
2 | 31 | def split_spectrum(H, n, split_point, V0=None):
N, _ = H.shape
H_shift = H - split_point * jnp.eye(N, dtype=H.dtype)
U, _, _, _ = qdwh.qdwh(H_shift, is_hermitian=True, dynamic_shape=(n, n))
P = -0.5 * (U - _mask(jnp.eye(N, dtype=H.dtype), (n, n)))
rank = jnp.round(jnp.trace(P)).astype(jnp.int32)
V_minus... | jax/_src/lax/eigh.py | 323 | jax | {
"docstring": " The Hermitian matrix `H` is split into two matrices `H_minus`\n `H_plus`, respectively sharing its eigenspaces beneath and above\n its `split_point`th eigenvalue.\n\n Returns, in addition, `V_minus` and `V_plus`, isometries such that\n `Hi = Vi.conj().T @ H @ Vi`. If `V0` is not None, `V0 @ Vi` a... | 174 | Python | 95 | b64e36b60fca9661ca2c8ae51a56fae07bf5efe6 | eigh.py | 120,630 | 13 | 197 | split_spectrum | https://github.com/google/jax.git | Make QDWH-eig implementation jit-table.
Move QDWH-eig from jax._src.scipy.eigh to jax._src.lax.eigh, in preparation for using it to back `lax.eigh` in a future change.
PiperOrigin-RevId: 449362382 | 210 | 0 | 26,905 | 15 | |
2 | 7 | def set_dash_joinstyle(self, s):
js = JoinStyle(s)
if self._dashjoinstyle != js:
self.stale = True
self._dashjoinstyle = js
| lib/matplotlib/lines.py | 52 | matplotlib | {
"docstring": "\n How to join segments of the line if it `~Line2D.is_dashed`.\n\n The default joinstyle is :rc:`lines.dash_joinstyle`.\n\n Parameters\n ----------\n s : `.JoinStyle` or %(JoinStyle)s\n ",
"language": "en",
"n_whitespaces": 65,
"n_words": 22,
"vocab_si... | 16 | Python | 12 | 4c2662ad6f8c7b3c06554dfa3633f50dd011beb0 | lines.py | 107,221 | 5 | 31 | set_dash_joinstyle | https://github.com/matplotlib/matplotlib.git | DOC: Document default join style
in the same way as the default cap styles. | 55 | 0 | 22,649 | 9 | |
4 | 22 | def set_omp_num_threads_if_unset() -> bool:
num_threads_from_env = os.environ.get("OMP_NUM_THREADS")
if num_threads_from_env is not None:
# No ops if it's set
return False
# If unset, try setting the correct CPU count assigned.
runtime_ctx = ray.get_runtime_context()
if runtime... | python/ray/_private/utils.py | 189 | ray | {
"docstring": "Set the OMP_NUM_THREADS to default to num cpus assigned to the worker\n\n This function sets the environment variable OMP_NUM_THREADS for the worker,\n if the env is not previously set and it's running in worker (WORKER_MODE).\n\n Returns True if OMP_NUM_THREADS is set in this function.\n\n ... | 129 | Python | 94 | 7c8859f1428224710e4c2db2abf0d9ec28536301 | utils.py | 136,708 | 27 | 105 | set_omp_num_threads_if_unset | https://github.com/ray-project/ray.git | [core] Set OMP_NUM_THREADS to `num_cpus` required by task/actors by default (#30496)
Ray currently sets OMP_NUM_THREADS=1 when the environ variable is not set.
This PR:
Sets OMP_NUM_THREADS to the number of cpus assigned to the worker that runs a task before running, and reset it after running.
If num_cpus is a f... | 260 | 0 | 30,974 | 11 | |
4 | 31 | def grant_instance_level_collection_management_permissions(apps, schema_editor):
Collection = apps.get_model("wagtailcore.Collection")
Group = apps.get_model("auth.Group")
GroupCollectionPermission = apps.get_model("wagtailcore.GroupCollectionPermission")
Permission = apps.get_model("auth.Permissio... | wagtail/core/migrations/0066_collection_management_permissions.py | 296 | wagtail | {
"docstring": "\n Give the groups who currently manage all collections permission to manage root collections\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 13,
"vocab_size": 11
} | 64 | Python | 52 | d10f15e55806c6944827d801cd9c2d53f5da4186 | 0066_collection_management_permissions.py | 73,773 | 28 | 172 | grant_instance_level_collection_management_permissions | https://github.com/wagtail/wagtail.git | Reformat with black | 263 | 0 | 16,100 | 14 | |
1 | 9 | def test_no_default_policy(self) -> None:
room_id = self.helper.create_room_as(self.user_id, tok=self.token)
self._test_retention(room_id)
| tests/rest/client/test_retention.py | 53 | synapse | {
"docstring": "Tests that an event doesn't get expired if there is neither a default retention\n policy nor a policy specific to the room.\n ",
"language": "en",
"n_whitespaces": 36,
"n_words": 22,
"vocab_size": 20
} | 9 | Python | 9 | 1901cb1d4a8b7d9af64493fbd336e9aa2561c20c | test_retention.py | 247,062 | 6 | 32 | test_no_default_policy | https://github.com/matrix-org/synapse.git | Add type hints to `tests/rest/client` (#12084) | 30 | 0 | 71,472 | 10 | |
2 | 36 | def dag_bag_ext():
clear_db_runs()
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
dag_0 = DAG("dag_0", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_0 = EmptyOperator(task_id="task_a_0", dag=dag_0)
task_b_0 = ExternalTaskMarker(
task_id="task_b_0", external_da... | tests/sensors/test_external_task_sensor.py | 460 | @pytest.fixture | airflow | {
"docstring": "\n Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies\n set up using ExternalTaskMarker and ExternalTaskSensor.\n\n dag_0: task_a_0 >> task_b_0\n |\n |\n dag_1: ---> task_... | 111 | Python | 69 | 49e336ae0302b386a2f47269a6d13988382d975f | test_external_task_sensor.py | 47,660 | 35 | 290 | dag_bag_ext | https://github.com/apache/airflow.git | Replace usage of `DummyOperator` with `EmptyOperator` (#22974)
* Replace usage of `DummyOperator` with `EmptyOperator` | 243 | 1 | 9,197 | 10 |
15 | 14 | def deserialize_keras_object(config, custom_objects=None):
| keras/saving/experimental/serialization_lib.py | 49 | """Retrieve the object by deserializing the config dict.
The config dict is a Python dictionary that consists of a set of key-value
pairs, and represents a Keras object, such as an `Optimizer`, `Layer`,
`Metrics`, etc. The saving and loading library uses the following keys to
record information of a Ke... | keras | {
"docstring": "Retrieve the object by deserializing the config dict.\n\n The config dict is a Python dictionary that consists of a set of key-value\n pairs, and represents a Keras object, such as an `Optimizer`, `Layer`,\n `Metrics`, etc. The saving and loading library uses the following keys to\n record... | 3 | Python | 3 | e3e3a428f0a7955040c8a8fb8b2ad6f3e16d29eb | serialization_lib.py | 279,741 | 54 | 281 | deserialize_keras_object | https://github.com/keras-team/keras.git | Remaster serialization logic.
There were several significant flaws, most prominently:
- We had 2 separate serialization systems partially overlapping and interacting with each other: the JSON encoder/decoder one, and serialize/deserialize_keras_objects. The new system is fully standalone.
- We ignored objects passed ... | 6 | 3 | 83,118 | 8 |
9 | 41 | def from_arrow(cls, at, index_cols=None, index=None, columns=None):
(
new_frame,
new_lengths,
new_widths,
unsupported_cols,
) = cls._partition_mgr_cls.from_arrow(at, return_dims=True)
if columns is not None:
new_columns = colu... | modin/experimental/core/execution/native/implementations/hdk_on_native/dataframe/dataframe.py | 396 | modin | {
"docstring": "\n Build a frame from an Arrow table.\n\n Parameters\n ----------\n at : pyarrow.Table\n Source table.\n index_cols : list of str, optional\n List of index columns in the source table which\n are ignored in transformation.\n in... | 117 | Python | 80 | 219edb5fb772609d3fafaac02ded0294ea434aa8 | dataframe.py | 155,368 | 39 | 258 | from_arrow | https://github.com/modin-project/modin.git | FIX-#4859: Add support for PyArrow Dictionary Arrays to type mapping (#5271)
Co-authored-by: Iaroslav Igoshev <Poolliver868@mail.ru>
Signed-off-by: Andrey Pavlenko <andrey.a.pavlenko@gmail.com> | 518 | 0 | 36,363 | 15 | |
1 | 16 | def test_retention_event_purged_with_state_event_outside_allowed(self) -> None:
room_id = self.helper.create_room_as(self.user_id, tok=self.token)
# Set a max_lifetime higher than the maximum allowed value.
self.helper.send_state(
room_id=room_id,
event_type=Eve... | tests/rest/client/test_retention.py | 174 | synapse | {
"docstring": "Tests that the server configuration can override the policy for a room when\n running the purge jobs.\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 17,
"vocab_size": 15
} | 100 | Python | 49 | 1901cb1d4a8b7d9af64493fbd336e9aa2561c20c | test_retention.py | 247,058 | 19 | 114 | test_retention_event_purged_with_state_event_outside_allowed | https://github.com/matrix-org/synapse.git | Add type hints to `tests/rest/client` (#12084) | 286 | 0 | 71,468 | 11 | |
2 | 7 | def raise_for_status(self) -> None:
try:
return super().raise_for_status()
except HTTPStatusError as exc:
raise PrefectHTTPStatusError.from_httpx_error(exc) from None
| src/prefect/client.py | 56 | prefect | {
"docstring": "\n Raise an exception if the response contains an HTTPStatusError.\n\n The `PrefectHTTPStatusError` contains useful additional information that\n is not contained in the `HTTPStatusError`.\n ",
"language": "en",
"n_whitespaces": 51,
"n_words": 22,
"vocab_size": 19
} | 15 | Python | 15 | f166d70fcfcdf4fceeb222f273b8e0eab6fb1b26 | client.py | 55,891 | 11 | 32 | raise_for_status | https://github.com/PrefectHQ/prefect.git | Create PrefectResponse | 58 | 0 | 11,416 | 11 | |
1 | 2 | def test(self):
| python/ray/tune/tests/test_tune_restore.py | 13 | ray | {
"docstring": "Trainable crashes with fail_fast flag and the original crash message\n should bubble up.",
"language": "en",
"n_whitespaces": 19,
"n_words": 13,
"vocab_size": 13
} | 2 | Python | 2 | b1496d235fce4f19fb53553e7fb78e97e1d19054 | test_tune_restore.py | 146,277 | 4 | 30 | test | https://github.com/ray-project/ray.git | [tune] fix error handling for fail_fast case. (#22982) | 9 | 0 | 33,646 | 6 | |
8 | 17 | def check_related_objects(self, field, value, opts):
if field.is_relation:
# Check that the field and the queryset use the same model in a
# query like .filter(author=Author.objects.all()). For example, the
# opts would be Author's (from the author field) and value.m... | django/db/models/sql/query.py | 165 | django | {
"docstring": "Check the type of object passed to query relations.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | 103 | Python | 71 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | query.py | 205,870 | 16 | 104 | check_related_objects | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 378 | 0 | 51,255 | 16 | |
1 | 6 | def single_source_dijkstra_path_length(G, source, cutoff=None, weight="weight"):
return multi_source_dijkstra_path_length(G, {source}, cutoff=cutoff, weight=weight)
| networkx/algorithms/shortest_paths/weighted.py | 50 | networkx | {
"docstring": "Find shortest weighted path lengths in G from a source node.\n\n Compute the shortest path length between source and all other\n reachable nodes for a weighted graph.\n\n Parameters\n ----------\n G : NetworkX graph\n\n source : node label\n Starting node for path\n\n cutof... | 10 | Python | 10 | d82815dba6c8ddce19cd49f700298dc82a58f066 | weighted.py | 177,506 | 2 | 33 | single_source_dijkstra_path_length | https://github.com/networkx/networkx.git | Hide edges with a weight of None in A*. (#5945)
* Hide edges with a weight of None in A*.
This matches the Dijkstra's weight interface.
* Update Dijkstra's and A* docs for weights of None.
* Add tests for A* with weight of None.
* Add another test for A* with a weight function.
* Document that None indi... | 16 | 0 | 42,410 | 8 | |
1 | 15 | def test_status_error_msg_format(ray_start_stop):
config_file_name = os.path.join(
os.path.dirname(__file__), "test_config_files", "deployment_fail.yaml"
)
subprocess.check_output(["serve", "deploy", config_file_name])
status_response = subprocess.check_output(
["serve", "status"... | python/ray/serve/tests/test_cli.py | 123 | ray | {
"docstring": "Deploys a faulty config file and checks its status.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | 25 | Python | 22 | b856daebbdc923a216ce412be477c61e6cc5707e | test_cli.py | 125,445 | 12 | 79 | test_status_error_msg_format | https://github.com/ray-project/ray.git | [Serve] Fix Formatting of Error Messages printed in `serve status` (#26578) | 63 | 0 | 27,873 | 11 | |
21 | 27 | def _get_style_dict(self, gc, rgbFace):
attrib = {}
forced_alpha = gc.get_forced_alpha()
if gc.get_hatch() is not None:
attrib['fill'] = "url(#%s)" % self._get_hatch(gc, rgbFace)
if (rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0
... | lib/matplotlib/backends/backend_svg.py | 558 | matplotlib | {
"docstring": "Generate a style string from the GraphicsContext and rgbFace.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | 145 | Python | 76 | ec410abbb3a721e31f3aaa61e9e4f941467e35e1 | backend_svg.py | 108,151 | 37 | 342 | _get_style_dict | https://github.com/matplotlib/matplotlib.git | Deprecate functions in backends | 580 | 0 | 23,079 | 17 | |
5 | 14 | def _get_node_attribute_at_index(self, node_index, attr, attr_name):
if not self._inbound_nodes:
raise RuntimeError(
f"The layer {self.name} has never been called "
f"and thus has no defined {attr_name}."
)
if not len(self._inbound_nodes) ... | keras/engine/base_layer.py | 165 | keras | {
"docstring": "Private utility to retrieves an attribute (e.g. inputs) from a node.\n\n This is used to implement the methods:\n - get_input_shape_at\n - get_output_shape_at\n - get_input_at\n etc...\n\n Args:\n node_index: Integer index of the nod... | 66 | Python | 54 | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | base_layer.py | 270,767 | 17 | 84 | _get_node_attribute_at_index | https://github.com/keras-team/keras.git | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | 249 | 0 | 80,572 | 15 | |
4 | 21 | def _is_current(self, file_path, zip_path):
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
... | .venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py | 152 | transferlearning | {
"docstring": "\n Return True if the file_path is current for this zip_path\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | 47 | Python | 36 | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | __init__.py | 63,102 | 11 | 92 | _is_current | https://github.com/jindongwang/transferlearning.git | upd; format | 143 | 0 | 13,147 | 11 | |
4 | 9 | def iter_mapped_dependants(self) -> Iterator["MappedOperator"]:
return (
downstream
for downstream in self._iter_all_mapped_downstreams()
if any(p.node_id == self.node_id for p in downstream.iter_mapped_dependencies())
)
| airflow/models/taskmixin.py | 69 | airflow | {
"docstring": "Return mapped nodes that depend on the current task the expansion.\n\n For now, this walks the entire DAG to find mapped nodes that has this\n current task as an upstream. We cannot use ``downstream_list`` since it\n only contains operators, not task groups. In the future, we shou... | 20 | Python | 17 | 197cff3194e855b9207c3c0da8ae093a0d5dda55 | taskmixin.py | 47,757 | 13 | 42 | iter_mapped_dependants | https://github.com/apache/airflow.git | Ensure TaskMap only checks "relevant" dependencies (#23053)
When looking for "mapped dependants" of a task, we only want a task if
it not only is a direct downstream of the task, but also it actually
"uses" the task's pushed XCom for task mapping. So we need to peek into
the mapped downstream task's expansion kwarg... | 74 | 0 | 9,246 | 12 | |
1 | 6 | def device_traits() -> list[str]:
return ["sdm.devices.traits.DoorbellChime"]
@pytest.fixture(autouse=True) | tests/components/nest/test_events.py | 42 | @pytest.fixture(autouse=True) | core | {
"docstring": "Fixture for the present traits of the device under test.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | 7 | Python | 7 | 7a5fa8eb58f49282e73f454826472ba54cd37a30 | test_events.py | 313,498 | 3 | 14 | device_traits | https://github.com/home-assistant/core.git | Update more nest tests to use common fixtures (#73303)
Update nest tests to use fixtures | 12 | 1 | 112,116 | 7 |
4 | 7 | def __getitem__(self, key):
if key in self._layout_map:
return self._layout_map[key]
for k in self._layout_map:
if re.match(k, key):
return self._layout_map[k]
return None
| keras/dtensor/layout_map.py | 74 | keras | {
"docstring": "Retrieve the corresponding layout by the string key.\n\n When there isn't an exact match, all the existing keys in the layout map\n will be treated as a regex and map against the input key again. The first\n match will be returned, based on the key insertion order. Return None if\... | 20 | Python | 14 | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | layout_map.py | 270,587 | 7 | 48 | __getitem__ | https://github.com/keras-team/keras.git | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | 85 | 0 | 80,488 | 11 | |
4 | 22 | def load_diabetes(*, return_X_y=False, as_frame=False, scaled=True):
data_filename = "diabetes_data_raw.csv.gz"
target_filename = "diabetes_target.csv.gz"
data = load_gzip_compressed_csv_data(data_filename)
target = load_gzip_compressed_csv_data(target_filename)
if scaled:
data = scale... | sklearn/datasets/_base.py | 260 | scikit-learn | {
"docstring": "Load and return the diabetes dataset (regression).\n\n ============== ==================\n Samples total 442\n Dimensionality 10\n Features real, -.2 < x < .2\n Targets integer 25 - 346\n ============== ==================\n\n .. note::\n The meaning... | 80 | Python | 60 | a793c1f0ad7dd63b2a896d2e84087089a11e7fca | _base.py | 258,643 | 30 | 164 | load_diabetes | https://github.com/scikit-learn/scikit-learn.git | DOC Ensures that sklearn.datasets._base.load_breast_cancer passes numpydoc validation (#22346)
Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
Co-authored-by: Arturo Amor <86408019+ArturoAmorQ@users.noreply.github.com> | 234 | 0 | 75,347 | 11 | |
1 | 5 | def require_pyctcdecode(test_case):
return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
| src/transformers/testing_utils.py | 37 | transformers | {
"docstring": "\n Decorator marking a test that requires pyctcdecode\n ",
"language": "en",
"n_whitespaces": 14,
"n_words": 7,
"vocab_size": 7
} | 7 | Python | 7 | 57e6464ac9a31156f1c93e59107323e6ec01309e | testing_utils.py | 37,487 | 2 | 20 | require_pyctcdecode | https://github.com/huggingface/transformers.git | Update all require decorators to use skipUnless when possible (#16999) | 13 | 0 | 6,794 | 10 | |
1 | 8 | def test_get_entities_changed(self) -> None:
cache = StreamChangeCache("#test", 1)
cache.entity_has_changed("user@foo.com", 2)
cache.entity_has_changed("bar@baz.net", 3)
cache.entity_has_changed("user@elsewhere.org", 4)
# Query all the entries, but mid-way through the ... | tests/util/test_stream_change_cache.py | 281 | synapse | {
"docstring": "\n StreamChangeCache.get_entities_changed will return the entities in the\n given list that have changed since the provided stream ID. If the\n stream position is earlier than the earliest known position, it will\n return all of the entities queried for.\n ",
"lan... | 150 | Python | 77 | acea4d7a2ff61b5beda420b54a8451088060a8cd | test_stream_change_cache.py | 250,017 | 45 | 158 | test_get_entities_changed | https://github.com/matrix-org/synapse.git | Add missing types to tests.util. (#14597)
Removes files under tests.util from the ignored by list, then
fully types all tests/util/*.py files. | 682 | 0 | 73,231 | 11 | |
3 | 7 | def is_hash_allowed(self, hashes):
# type: (Optional[Hashes]) -> bool
if hashes is None or not self.has_hash:
return False
# Assert non-None so mypy knows self.hash_name and self.hash are str.
assert self.hash_name is not None
assert self.hash is not None
... | .venv/lib/python3.8/site-packages/pip/_internal/models/link.py | 79 | transferlearning | {
"docstring": "\n Return True if the link has a hash and it is allowed.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 12
} | 52 | Python | 40 | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | link.py | 60,852 | 6 | 49 | is_hash_allowed | https://github.com/jindongwang/transferlearning.git | upd; format | 111 | 0 | 12,300 | 9 | |
1 | 3 | def get_paths(self, path): # type: (str) -> t.List[str]
return []
| test/lib/ansible_test/_internal/provider/source/unsupported.py | 21 | ansible | {
"docstring": "Return the list of available content paths under the given path.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 10
} | 10 | Python | 10 | de5f60e374524de13fe079b52282cd7a9eeabd5f | unsupported.py | 266,497 | 2 | 11 | get_paths | https://github.com/ansible/ansible.git | ansible-test - Improve help for unsupported cwd. (#76866)
* ansible-test - Improve help for unsupported cwd.
* The `--help` option is now available when an unsupported cwd is in use.
* The `--help` output now shows the same instructions about cwd as would be shown in error messages if the cwd is unsupported.
* Ad... | 25 | 0 | 78,438 | 6 | |
2 | 9 | def _normalized_keys(self, section, items):
# type: (str, Iterable[Tuple[str, Any]]) -> Dict[str, Any]
normalized = {}
for name, val in items:
key = section + "." + _normalize_name(name)
normalized[key] = val
return normalized
| .venv/lib/python3.8/site-packages/pip/_internal/configuration.py | 65 | transferlearning | {
"docstring": "Normalizes items to construct a dictionary with normalized keys.\n\n This routine is where the names become keys and are made the same\n regardless of source - configuration files or environment.\n ",
"language": "en",
"n_whitespaces": 51,
"n_words": 30,
"vocab_size": 29
} | 32 | Python | 27 | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | configuration.py | 60,675 | 6 | 39 | _normalized_keys | https://github.com/jindongwang/transferlearning.git | upd; format | 89 | 0 | 12,240 | 11 | |
1 | 38 | def check_figures_equal(*, extensions=("png", "pdf", "svg"), tol=0):
| lib/matplotlib/testing/decorators.py | 130 |
"""
Decorator for test cases that generate and compare two figures.
The decorated function must take two keyword arguments, *fig_test*
and *fig_ref*, and draw the test and reference images on them.
After the function returns, the figures are saved and compared.
This decorator should be prefer... | matplotlib | {
"docstring": "\n Decorator for test cases that generate and compare two figures.\n\n The decorated function must take two keyword arguments, *fig_test*\n and *fig_ref*, and draw the test and reference images on them.\n After the function returns, the figures are saved and compared.\n\n This decorator... | 6 | Python | 6 | ca78e3d0eba4d948835c5499e0ff4084b998f28e | decorators.py | 110,184 | 39 | 45 | check_figures_equal | https://github.com/matplotlib/matplotlib.git | [DOC] swapped params in fig_compare_error msg | 9 | 8 | 23,964 | 9 |
2 | 11 | def lowest_common_ancestor(G, node1, node2, default=None):
ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)]))
if ans:
assert len(ans) == 1
return ans[0][1]
else:
return default
@not_implemented_for("undirected")
@not_implemented_for("multigraph") | networkx/algorithms/lowest_common_ancestors.py | 105 | @not_implemented_for("undirected")
@not_implemented_for("multigraph") | networkx | {
"docstring": "Compute the lowest common ancestor of the given pair of nodes.\n\n Parameters\n ----------\n G : NetworkX directed graph\n\n node1, node2 : nodes in the graph.\n\n default : object\n Returned if no common ancestor between `node1` and `node2`\n\n Returns\n -------\n The l... | 23 | Python | 22 | abaa68779ccb4cce8d1a5ecade622ab96d01edeb | lowest_common_ancestors.py | 176,975 | 7 | 55 | lowest_common_ancestor | https://github.com/networkx/networkx.git | Add examples to lowest common ancestors algorithms (#5531)
* Add examples to lowest common ancestors documentation
* Fix output style of examples
* Fix output style of example
* Update pre-commit
* Update networkx/algorithms/lowest_common_ancestors.py
Co-authored-by: Ross Barnowski <rossbar@berkeley.edu... | 54 | 1 | 42,203 | 13 |
7 | 23 | def _get_per_session_stats(self):
if self._per_session_stats is None:
logger.debug("Collating per session stats")
compiled = []
for session_id, ts_data in self._time_stats.items():
logger.debug("Compiling session ID: %s", session_id)
i... | lib/gui/analysis/stats.py | 425 | faceswap | {
"docstring": " Populate the attribute :attr:`_per_session_stats` with a sorted list by session ID\n of each ID in the training/loaded session. Stats contain the session ID, start, end and\n elapsed times, the training rate, batch size and number of iterations for each session.\n\n If a training... | 107 | Python | 79 | c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf | stats.py | 100,313 | 26 | 249 | _get_per_session_stats | https://github.com/deepfakes/faceswap.git | Update code to support Tensorflow versions up to 2.8 (#1213)
* Update maximum tf version in setup + requirements
* - bump max version of tf version in launcher
- standardise tf version check
* update keras get_custom_objects for tf>2.6
* bugfix: force black text in GUI file dialogs (linux)
* dssim loss -... | 447 | 0 | 19,810 | 18 | |
1 | 4 | def load_info():
text =
console.print(text)
| gamestonk_terminal/portfolio/portfolio_view.py | 40 | OpenBBTerminal | {
"docstring": "Prints instructions to load a CSV\n\n Returns\n ----------\n text : str\n Information on how to load a csv\n \nIn order to load a CSV do the following:\n\n1. Add headers to the first row, below is data for each column:\\n\n\\t1. Identifier for the asset (such as a stock ticker)\n\\t... | 5 | Python | 5 | 82747072c511beb1b2672846ae2ee4aec53eb562 | portfolio_view.py | 281,521 | 16 | 14 | load_info | https://github.com/OpenBB-finance/OpenBBTerminal.git | Terminal Wide Rich (#1161)
* My idea for how we handle Rich moving forward
* remove independent consoles
* FIxed pylint issues
* add a few vars
* Switched print to console
* More transitions
* Changed more prints
* Replaced all prints
* Fixing tabulate
* Finished replace tabulate
* Finish... | 15 | 0 | 83,824 | 7 | |
1 | 6 | def MultivariateNormal(name, mu, sigma):
r
return multivariate_rv(MultivariateNormalDistribution, name, mu, sigma)
#-------------------------------------------------------------------------------
# Multivariate Laplace distribution --------------------------------------------
| sympy/stats/joint_rv_types.py | 33 | sympy | {
"docstring": "\n Creates a continuous random variable with Multivariate Normal\n Distribution.\n\n The density of the multivariate normal distribution can be found at [1].\n\n Parameters\n ==========\n\n mu : List representing the mean or the mean vector\n sigma : Positive semidefinite square m... | 16 | Python | 15 | 9ad8ab9fe58051cf11626ba6654852fcfec60147 | joint_rv_types.py | 196,702 | 58 | 22 | MultivariateNormal | https://github.com/sympy/sympy.git | Documentation cleanup 5 | 19 | 0 | 48,120 | 7 | |
2 | 7 | def cursor_text_end(self) -> bool:
text_length = len(self.content)
if self.cursor_index == text_length:
return False
self.cursor_index = text_length
return True
| src/textual/_text_backend.py | 54 | textual | {
"docstring": "Move the cursor to the end of the text\n\n Returns:\n bool: True if the cursor moved. False otherwise.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 18,
"vocab_size": 14
} | 18 | Python | 14 | dd18ecbdbe744812509630935a877424202f2a70 | _text_backend.py | 183,434 | 11 | 32 | cursor_text_end | https://github.com/Textualize/textual.git | Docstring improvements | 64 | 0 | 44,186 | 9 | |
2 | 7 | def has_perms(self, perm_list, obj=None): # noqa: D205, D212, D400, D415
return all(self.has_perm(perm, obj) for perm in perm_list)
| saleor/permission/models.py | 44 | saleor | {
"docstring": "\n Return True if the user has each of the specified permissions. If\n object is passed, check if the user has all required perms for it.\n ",
"language": "en",
"n_whitespaces": 47,
"n_words": 25,
"vocab_size": 20
} | 17 | Python | 17 | d5ef58653803075849a6a13177e7a6e604aa2f60 | models.py | 30,034 | 2 | 28 | has_perms | https://github.com/saleor/saleor.git | Move PermissionsMixin from django auth | 32 | 0 | 5,285 | 9 | |
7 | 25 | def get(self):
response = {
'learn': False,
'predict': False,
'analyse': False
}
if os.name != 'posix':
return response
for process_type in response:
processes_dir = Path(tempfile.gettempdir()).joinpath(f'mindsdb/proc... | mindsdb/api/http/namespaces/util.py | 230 | @ns_conf.route('/telemetry') | mindsdb | {
"docstring": " Checks server use native for learn or analyse.\n Will return right result only on Linux.\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 15,
"vocab_size": 15
} | 55 | Python | 42 | 44d7ef0e08e5144870ad2831ce6e221f9044c47c | util.py | 114,397 | 22 | 125 | get | https://github.com/mindsdb/mindsdb.git | 'files' route | 316 | 1 | 25,182 | 16 |
1 | 6 | def add_suffix(self, suffix): # noqa: PR01, RT01, D200
return self.__constructor__(
query_compiler=self._query_compiler.add_suffix(suffix)
)
| modin/pandas/dataframe.py | 41 | modin | {
"docstring": "\n Suffix labels with string `suffix`.\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 5,
"vocab_size": 5
} | 12 | Python | 12 | b541b6c18e6fb4515e998b9b4f88528490cf69c6 | dataframe.py | 155,480 | 4 | 24 | add_suffix | https://github.com/modin-project/modin.git | REFACTOR-#3948: Use `__constructor__` in `DataFrame` and `Series` classes (#5485)
Signed-off-by: Anatoly Myachev <anatoly.myachev@intel.com> | 45 | 0 | 36,395 | 11 | |
5 | 28 | def _broadcasting_select_mhlo(which, x, y):
which_type, x_type, y_type = (
ir.RankedTensorType(v.type) for v in (which, x, y))
out_shape = list(lax_internal.broadcast_shapes(
tuple(which_type.shape), tuple(x_type.shape), tuple(y_type.shape)))
bcast_dims = lambda shape: mlir.dense_int_elements(
... | jax/_src/lax/linalg.py | 316 | jax | {
"docstring": "Wrapper around XLA `Select` that broadcasts its arguments.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | 68 | Python | 50 | bc658e74567ffa941b31f4e89463dc713d2ecbf4 | linalg.py | 120,043 | 20 | 211 | _broadcasting_select_mhlo | https://github.com/google/jax.git | [MHLO] Add direct MHLO lowerings for most linear algebra kernels.
PiperOrigin-RevId: 439927594 | 140 | 0 | 26,747 | 14 | |
1 | 4 | def preprocess_input(x, data_format=None):
return x
@keras_export("keras.applications.regnet.decode_predictions") | keras/applications/regnet.py | 32 | @keras_export("keras.applications.regnet.decode_predictions") | keras | {
"docstring": "A placeholder method for backward compatibility.\n\n The preprocessing logic has been included in the regnet model\n implementation. Users are no longer required to call this method to\n normalize the input data. This method does nothing and only kept as a\n placeholder to align the API su... | 6 | Python | 6 | 3613c3defc39c236fb1592c4f7ba1a9cc887343a | regnet.py | 278,618 | 2 | 12 | preprocess_input | https://github.com/keras-team/keras.git | Remove pylint comments.
PiperOrigin-RevId: 452353044 | 11 | 1 | 82,631 | 7 |
4 | 10 | def setDebugActions(self, startAction, successAction, exceptionAction):
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debu... | .venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py | 54 | transferlearning | {
"docstring": "\n Enable display of debugging messages while doing pattern matching.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | 21 | Python | 18 | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | pyparsing.py | 63,461 | 6 | 36 | setDebugActions | https://github.com/jindongwang/transferlearning.git | upd; format | 105 | 0 | 13,314 | 8 | |
1 | 4 | def preferred_batch_format(cls) -> BatchFormat:
return BatchFormat.PANDAS
| python/ray/train/predictor.py | 22 | ray | {
"docstring": "Batch format hint for upstream producers to try yielding best block format.\n\n The preferred batch format to use if both `_predict_pandas` and\n `_predict_numpy` are implemented. Defaults to Pandas.\n\n Can be overriden by predictor classes depending on the framework type,\n ... | 6 | Python | 6 | 326d84f1149319809191e7887155df7f04f6f46a | predictor.py | 136,395 | 12 | 12 | preferred_batch_format | https://github.com/ray-project/ray.git | [AIR][Predictor] Enable numpy based predictor (#28917)
Co-authored-by: Clark Zinzow <clarkzinzow@gmail.com>
Co-authored-by: Amog Kamsetty <amogkam@users.noreply.github.com> | 20 | 0 | 30,906 | 6 | |
1 | 20 | def test_song_from_data_dump():
# Loads from str
song = Song.from_data_dump(
)
assert song.name == "Ropes"
assert song.artists == ["Dirty Palm", "Chandler Jewels"]
assert song.album_name == "Ropes"
assert song.album_artist == "Dirty Palm"
assert song.genres == ["gaming ed... | tests/types/test_song.py | 207 | spotify-downloader | {
"docstring": "\n Tests if Song.from_data_dump() works correctly.\n \n {\n \"name\": \"Ropes\",\n \"artists\": [\"Dirty Palm\", \"Chandler Jewels\"],\n \"album_name\": \"Ropes\",\n \"album_artist\": \"Dirty Palm\",\n \"genres\": [\"gaming edm\", \"m... | 84 | Python | 50 | fa2ad657482aca9dc628e6d7062b8badf2706bb6 | test_song.py | 30,144 | 47 | 119 | test_song_from_data_dump | https://github.com/spotDL/spotify-downloader.git | v4 init | 169 | 0 | 5,345 | 9 | |
8 | 21 | def _dedupe_indices_in_rule(self, rule):
index_rules = {k:v for k,v in rule.items() if isinstance(k, TensorIndex)}
other_rules = {k:v for k,v in rule.items() if k not in index_rules.keys()}
exclude = set(self.get_indices())
newrule = {}
newrule.update(index_rules)
... | sympy/tensor/tensor.py | 245 | sympy | {
"docstring": "\n rule: dict\n\n This applies self._dedupe_indices on all values of rule.\n ",
"language": "en",
"n_whitespaces": 32,
"n_words": 10,
"vocab_size": 10
} | 61 | Python | 39 | 1eee7b6ba5b4903ac889a73feab130572d232554 | tensor.py | 200,567 | 16 | 153 | _dedupe_indices_in_rule | https://github.com/sympy/sympy.git | Add TensMul._dedupe_indices_in_rule
This applies self._dedupe_indices on all values of `rule`. | 209 | 0 | 49,702 | 14 | |
1 | 28 | def test_delete_media_never_accessed(self) -> None:
# upload and do not access
server_and_media_id = self._create_media()
self.pump(1.0)
# test that the file exists
media_id = server_and_media_id.split("/")[1]
local_path = self.filepaths.local_media_filepath(me... | tests/rest/admin/test_media.py | 236 | synapse | {
"docstring": "\n Tests that media deleted if it is older than `before_ts` and never accessed\n `last_access_ts` is `NULL` and `created_ts` < `before_ts`\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 20,
"vocab_size": 17
} | 56 | Python | 48 | c97042f7eef3748e17c90e48a4122389a89c4735 | test_media.py | 249,114 | 23 | 146 | test_delete_media_never_accessed | https://github.com/matrix-org/synapse.git | Use literals in place of `HTTPStatus` constants in tests (#13469) | 230 | 0 | 72,621 | 11 | |
7 | 11 | def parseline(self, line):
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
... | python3.10.4/Lib/cmd.py | 211 | XX-Net | {
"docstring": "Parse the line into a command name and a string containing\n the arguments. Returns a tuple containing (command, args, line).\n 'command' and 'args' may be None if the line couldn't be parsed.\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 32,
"vocab_size": 24
} | 66 | Python | 41 | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | cmd.py | 221,346 | 15 | 129 | parseline | https://github.com/XX-net/XX-Net.git | add python 3.10.4 for windows | 203 | 0 | 56,361 | 14 | |
11 | 39 | def filter_queryset(self, request, queryset, view):
fields = set(view.get_available_fields(queryset.model, db_fields_only=True))
# Locale is a database field, but we provide a separate filter for it
if "locale" in fields:
fields.remove("locale")
for field_name, val... | wagtail/api/v2/filters.py | 359 | wagtail | {
"docstring": "\n This performs field level filtering on the result set\n Eg: ?title=James Joyce\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 12,
"vocab_size": 12
} | 162 | Python | 108 | d10f15e55806c6944827d801cd9c2d53f5da4186 | filters.py | 72,652 | 31 | 220 | filter_queryset | https://github.com/wagtail/wagtail.git | Reformat with black | 758 | 0 | 15,909 | 21 | |
1 | 4 | def _generate_examples(self, **kwargs):
raise NotImplementedError()
| src/datasets/builder.py | 24 | datasets | {
"docstring": "Default function generating examples for each `SplitGenerator`.\n\n This function preprocess the examples from the raw data to the preprocessed\n dataset files.\n This function is called once for each `SplitGenerator` defined in\n `_split_generators`. The examples yielded h... | 5 | Python | 5 | 5669b8c8d75b8c3106abd23f21d902d1f020e25d | builder.py | 105,068 | 2 | 13 | _generate_examples | https://github.com/huggingface/datasets.git | Add missing kwargs to docstrings (#4446) | 19 | 0 | 22,061 | 7 | |
2 | 8 | def get_delayed_update_fields(self):
self.extra_update_fields['emitted_events'] = self.event_ct
if 'got an unexpected keyword argument' in self.extra_update_fields.get('result_traceback', ''):
self.delay_update(result_traceback=ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE)
return sel... | awx/main/tasks/callback.py | 76 | awx | {
"docstring": "Return finalized dict of all fields that should be saved along with the job status change",
"language": "en",
"n_whitespaces": 15,
"n_words": 16,
"vocab_size": 16
} | 17 | Python | 17 | 452744b67e02823879e722fe574984a2d760ed60 | callback.py | 81,164 | 5 | 42 | get_delayed_update_fields | https://github.com/ansible/awx.git | Delay update of artifacts and error fields until final job save (#11832)
* Delay update of artifacts until final job save
Save tracebacks from receptor module to callback object
Move receptor traceback check up to be more logical
Use new mock_me fixture to avoid DB call with me method
Update the special ru... | 56 | 0 | 17,165 | 10 | |
3 | 33 | def forward_train(self, feat, out_enc, targets, valid_ratios):
tgt_embedding = self.embedding(targets)
n, c_enc, h, w = out_enc.shape
assert c_enc == self.dim_model
_, c_feat, _, _ = feat.shape
assert c_feat == self.dim_input
_, len_q, c_q = tgt_embedding.shape... | ppocr/modeling/heads/rec_robustscanner_head.py | 300 | PaddleOCR | {
"docstring": "\n Args:\n feat (Tensor): Tensor of shape :math:`(N, D_i, H, W)`.\n out_enc (Tensor): Encoder output of shape\n :math:`(N, D_m, H, W)`.\n targets (Tensor): a tensor of shape :math:`(N, T)`. Each element is the index of a\n character... | 163 | Python | 91 | 63484257442362057ab4ea4acd769d52d42da9f1 | rec_robustscanner_head.py | 23,815 | 22 | 200 | forward_train | https://github.com/PaddlePaddle/PaddleOCR.git | add robustscanner | 443 | 0 | 4,647 | 13 | |
6 | 15 | def normalize_path_patterns(patterns):
patterns = [os.path.normcase(p) for p in patterns]
dir_suffixes = {"%s*" % path_sep for path_sep in {"/", os.sep}}
norm_patterns = []
for pattern in patterns:
for dir_suffix in dir_suffixes:
if pattern.endswith(dir_suffix):
... | django/core/management/utils.py | 141 | django | {
"docstring": "Normalize an iterable of glob style patterns based on OS.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | 39 | Python | 29 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | utils.py | 204,719 | 12 | 86 | normalize_path_patterns | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 123 | 0 | 50,851 | 18 | |
2 | 4 | def details(self):
return self._details if self._details else self.og_exception.details()
| jina/excepts.py | 37 | jina | {
"docstring": "\n :return: details of this exception\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 5,
"vocab_size": 5
} | 8 | Python | 7 | 072a47a4fa97aca68203882e1ef809681a523097 | excepts.py | 12,341 | 2 | 22 | details | https://github.com/jina-ai/jina.git | feat: better error messages when gateway can't connect to other deployment (#4677) | 22 | 0 | 2,262 | 9 | |
1 | 6 | def bind(self, *args, **kwargs) -> DeploymentNode:
raise NotImplementedError()
| python/ray/serve/api.py | 31 | ray | {
"docstring": "Bind the provided arguments and return a DeploymentNode.\n\n The returned bound deployment can be deployed or bound to other\n deployments to create a multi-deployment application.\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 25,
"vocab_size": 22
} | 8 | Python | 8 | f646d3fc312f63a6cf3e59a00ae1b3d6ab40393a | api.py | 146,461 | 7 | 18 | bind | https://github.com/ray-project/ray.git | [serve] Add unimplemented interfaces for Deployment DAG APIs (#23125)
Adds the following interfaces (without implementation, for discussion / approval):
- `serve.Application`
- `serve.DeploymentNode`
- `serve.DeploymentMethodNode`, `serve.DAGHandle`, and `serve.drivers.PipelineDriver`
- `serve.run` & `serve.build`... | 22 | 0 | 33,689 | 7 | |
1 | 3 | def _inflate_g(g, n):
# TODO should this be a method of meijerg?
# See: [L, page 150, equation (5)] | sympy/integrals/meijerint.py | 17 | sympy | {
"docstring": " Return C, h such that h is a G function of argument z**n and\n g = C*h. ",
"language": "en",
"n_whitespaces": 25,
"n_words": 17,
"vocab_size": 16
} | 19 | Python | 18 | e94a7b45d7b033ccbd57395dca28b654f875c54c | meijerint.py | 198,410 | 8 | 118 | _inflate_g | https://github.com/sympy/sympy.git | Improve loop performance | 28 | 0 | 48,919 | 6 | |
56 | 66 | def paramToDict(place, parameters=None):
testableParameters = OrderedDict()
if place in conf.parameters and not parameters:
parameters = conf.parameters[place]
parameters = re.sub(r"&(\w{1,4});", r"%s\g<1>%s" % (PARAMETER_AMP_MARKER, PARAMETER_SEMICOLON_MARKER), parameters)
if place == P... | lib/core/common.py | 868 | def paramToDict(place, parameters=None):
"""
Split the parameters into names and values, check if these parameters
are within the testable parameters and return in a dictionary.
"""
testableParameters = OrderedDict()
if place in conf.parameters and not parameters:
parameters = conf.par... | sqlmap | {
"docstring": "\n Split the parameters into names and values, check if these parameters\n are within the testable parameters and return in a dictionary.\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 21,
"vocab_size": 17
} | 325 | Python | 170 | df4293473d2fb6e887e31522cab5aff95e201581 | common.py | 123,469 | 110 | 979 | paramToDict | https://github.com/sqlmapproject/sqlmap.git | Fixing DeprecationWarning (logger.warn) | 1,252 | 1 | 27,381 | 15 |
19 | 29 | def getcolor(self, color, image=None):
if self.rawmode:
raise ValueError("palette contains raw palette data")
if isinstance(color, tuple):
if self.mode == "RGB" and len(color) == 4:
if color[3] == 255:
color = color[:3]
... | src/PIL/ImagePalette.py | 500 | Pillow | {
"docstring": "Given an rgb tuple, allocate palette entry.\n\n .. warning:: This method is experimental.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 13,
"vocab_size": 13
} | 178 | Python | 102 | f9a3178bb34e6b28bc46d42ef88f5069ebabde32 | ImagePalette.py | 243,502 | 49 | 299 | getcolor | https://github.com/python-pillow/Pillow.git | Fix #6652: Handle translucent color used in RGB ImagePallete | 1,055 | 0 | 70,038 | 22 | |
7 | 38 | def draw_masks(ax, img, masks, color=None, with_edge=True, alpha=0.8):
taken_colors = set([0, 0, 0])
if color is None:
random_colors = np.random.randint(0, 255, (masks.size(0), 3))
color = [tuple(c) for c in random_colors]
color = np.array(color, dtype=np.uint8)
polygons = []
... | mmdet/core/visualization/image.py | 325 | mmdetection | {
"docstring": "Draw masks on the image and their edges on the axes.\n\n Args:\n ax (matplotlib.Axes): The input axes.\n img (ndarray): The image with the shape of (3, h, w).\n masks (ndarray): The masks with the shape of (n, h, w).\n color (ndarray): The colors for each masks with the ... | 91 | Python | 66 | 301d4a2d4cfe1cdb62608e2892924be3e67e3098 | image.py | 243,968 | 21 | 217 | draw_masks | https://github.com/open-mmlab/mmdetection.git | [Feature] Support visualization for Panoptic Segmentation (#7041)
* First commit of v2
* split the functions
* Support to show panoptic result
* temp
* Support to show gt
* support show gt
* fix lint
* Support to browse datasets
* Fix unit tests
* Fix findContours
* fix comments
* Fix ... | 218 | 0 | 70,170 | 13 | |
9 | 19 | def convert_to_experiment_list(experiments):
exp_list = experiments
# Transform list if necessary
if experiments is None:
exp_list = []
elif isinstance(experiments, Experiment):
exp_list = [experiments]
elif type(experiments) is dict:
exp_list = [
Experiment... | python/ray/tune/experiment.py | 188 | ray | {
"docstring": "Produces a list of Experiment objects.\n\n Converts input from dict, single experiment, or list of\n experiments to list of experiments. If input is None,\n will return an empty list.\n\n Arguments:\n experiments (Experiment | list | dict): Experiments to run.\n\n Returns:\n ... | 79 | Python | 59 | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | experiment.py | 132,194 | 19 | 112 | convert_to_experiment_list | https://github.com/ray-project/ray.git | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | 214 | 0 | 29,694 | 13 | |
1 | 23 | def test_approx_iou_assigner_with_empty_boxes_and_gt(self):
assigner = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
gt_labels = torch.LongTensor([])
pred_instances ... | tests/test_core/test_bbox/test_assigners/test_approx_max_iou_assigner.py | 178 | mmdetection | {
"docstring": "Test corner case where an network might predict no boxes and no\n gt.",
"language": "en",
"n_whitespaces": 19,
"n_words": 13,
"vocab_size": 12
} | 45 | Python | 29 | bb7239ff635c4d9afd9c37a6e432251029aafb51 | test_approx_max_iou_assigner.py | 245,072 | 16 | 116 | test_approx_iou_assigner_with_empty_boxes_and_gt | https://github.com/open-mmlab/mmdetection.git | Refactor SABL RetinaNet | 165 | 0 | 70,644 | 10 | |
1 | 20 | def test_remove_not_installed():
name = "foo"
list_pkgs_mock = MagicMock(return_value={})
cmd_mock = MagicMock(
return_value={"pid": 12345, "retcode": 0, "stdout": "", "stderr": ""}
)
salt_mock = {
"cmd.run_all": cmd_mock,
"lowpkg.version_cmp": rpm.version_cmp,
"... | tests/pytests/unit/modules/test_yumpkg.py | 379 | salt | {
"docstring": "\n Tests that no exception raised on removing not installed package\n ",
"language": "en",
"n_whitespaces": 17,
"n_words": 10,
"vocab_size": 10
} | 78 | Python | 60 | 8ea5342cbde034383938e244cdb16a0bf8a777e8 | test_yumpkg.py | 216,118 | 28 | 212 | test_remove_not_installed | https://github.com/saltstack/salt.git | Fix exception in yumpkg.remove for not installed package | 280 | 0 | 54,409 | 14 | |
10 | 25 | def forward(self, tgt, memory, tgt_mask=None, memory_mask=None, cache=None):
r
tgt_mask = _convert_attention_mask(tgt_mask, tgt.dtype)
memory_mask = _convert_attention_mask(memory_mask, memory.dtype)
residual = tgt
if self.normalize_before:
tgt = self.norm1(tgt)
... | examples/model_interpretation/task/transformer.py | 437 | PaddleNLP | {
"docstring": "\n Applies a Transformer decoder layer on the input.\n\n Parameters:\n tgt (Tensor): The input of Transformer decoder layer. It is a tensor\n with shape `[batch_size, target_length, d_model]`. The data type\n should be float32 or float64.\n ... | 126 | Python | 47 | 93cae49c0c572b5c1ac972759140fbe924b0374d | transformer.py | 323,000 | 80 | 290 | forward | https://github.com/PaddlePaddle/PaddleNLP.git | Add NLP model interpretation (#1752)
* upload NLP interpretation
* fix problems and relocate project
* remove abandoned picture
* remove abandoned picture
* fix dead link in README
* fix dead link in README
* fix code style problems
* fix CR round 1
* remove .gitkeep files
* fix code style
... | 527 | 0 | 118,328 | 14 | |
2 | 10 | def forgiving_int(value, default=_SENTINEL, base=10):
result = jinja2.filters.do_int(value, default=default, base=base)
if result is _SENTINEL:
raise_no_default("int", value)
return result
| homeassistant/helpers/template.py | 71 | core | {
"docstring": "Try to convert value to an int, and raise if it fails.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 11
} | 17 | Python | 15 | 4885331509eeffe50f42d76b234996467b06170f | template.py | 300,603 | 5 | 45 | forgiving_int | https://github.com/home-assistant/core.git | Fail template functions when no default specified (#71687) | 36 | 0 | 99,463 | 10 | |
36 | 55 | def norm(x, ord=None, axis=None, keepdims=False):
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord... | numpy/linalg/linalg.py | 1,183 | numpy | {
"docstring": "\n Matrix or vector norm.\n\n This function is able to return one of eight different matrix norms,\n or one of an infinite number of vector norms (described below), depending\n on the value of the ``ord`` parameter.\n\n Parameters\n ----------\n x : array_like\n Input array... | 395 | Python | 178 | 0d13f9f747887b290108a909dd92c3cb47239921 | linalg.py | 160,203 | 87 | 746 | norm | https://github.com/numpy/numpy.git | BUG: Consistent promotion for norm for all values of ord (#17709)
Previously, numpy.linalg.norm would return values with the same floating-point
type as input arrays for most values of the ``ord`` parameter, but not all.
This PR fixes this so that the output dtype matches the input for all (valid) values
of ``ord``... | 1,233 | 0 | 38,571 | 21 | |
1 | 38 | def test_multiple_tags_return_distinct_objects_with_seperate_config_contexts(self):
site = Site.objects.first()
platform = Platform.objects.first()
tenant = Tenant.objects.first()
tag1, tag2 = list(Tag.objects.all())
tag_context_1 = ConfigContext.objects.create(
... | netbox/extras/tests/test_models.py | 358 | netbox | {
"docstring": "\n Tagged items use a generic relationship, which results in duplicate rows being returned when queried.\n This is combatted by by appending distinct() to the config context querysets. This test creates a config\n context assigned to two tags and ensures objects related by those s... | 57 | Python | 42 | d4a231585ac9a25d9739552d8c9e433dbf9398af | test_models.py | 266,200 | 33 | 223 | test_multiple_tags_return_distinct_objects_with_seperate_config_contexts | https://github.com/netbox-community/netbox.git | Clean up tests | 360 | 0 | 78,334 | 12 | |
1 | 11 | def test_recorder_pool(caplog):
engine = create_engine("sqlite://", poolclass=RecorderPool)
get_session = sessionmaker(bind=engine)
shutdown = False
connections = []
| tests/components/recorder/test_pool.py | 55 | core | {
"docstring": "Test RecorderPool gives the same connection in the creating thread.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | 15 | Python | 12 | bc862e97ed68cce8c437327651f85892787e755e | test_pool.py | 293,727 | 34 | 234 | test_recorder_pool | https://github.com/home-assistant/core.git | Use a dedicated executor pool for database operations (#68105)
Co-authored-by: Erik Montnemery <erik@montnemery.com>
Co-authored-by: Franck Nijhof <git@frenck.dev> | 30 | 0 | 92,782 | 9 | |
8 | 37 | def apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype="dtype", nout=None):
from dask.array.utils import meta_from_array
# make sure that every arg is an evaluated array
args = [
np.ones_like(meta_from_array(x), shape=((1,) * x.ndim), dtype=x.dtype)
if is_arraylike(x)
... | dask/array/core.py | 341 | dask | {
"docstring": "\n Tries to infer output dtype of ``func`` for a small set of input arguments.\n\n Parameters\n ----------\n func: Callable\n Function for which output dtype is to be determined\n\n args: List of array like\n Arguments to the function, which would usually be used. Only att... | 119 | Python | 92 | cccb9d8d8e33a891396b1275c2448c352ef40c27 | core.py | 156,008 | 37 | 192 | apply_infer_dtype | https://github.com/dask/dask.git | absolufy-imports - No relative - PEP8 (#8796)
Conversation in https://github.com/dask/distributed/issues/5889 | 413 | 0 | 36,503 | 16 | |
2 | 8 | def _cursor(self) -> Optional[Cursor]:
if self._provided_cursor is None:
return cursor.get_container_cursor(self._root_container)
else:
return self._provided_cursor
| lib/streamlit/delta_generator.py | 55 | streamlit | {
"docstring": "Return our Cursor. This will be None if we're not running in a\n ScriptThread - e.g., if we're running a \"bare\" script outside of\n Streamlit.\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 25,
"vocab_size": 21
} | 13 | Python | 11 | 704eab3478cf69847825b23dabf15813a8ac9fa2 | delta_generator.py | 118,556 | 9 | 33 | _cursor | https://github.com/streamlit/streamlit.git | Rename and refactor `Report` machinery (#4141)
This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app". | 56 | 0 | 26,289 | 10 | |
3 | 10 | def _mysql_tables_where_indexes_already_present(conn):
to_check = [
('xcom', 'idx_xcom_task_instance'),
('task_reschedule', 'idx_task_reschedule_dag_run'),
('task_fail', 'idx_task_fail_task_instance'),
]
tables = set()
for tbl, idx in to_check:
if conn.execute(f"show... | airflow/migrations/versions/0111_2_3_3_add_indexes_for_cascade_deletes.py | 115 | airflow | {
"docstring": "\n If user downgraded and is upgrading again, we have to check for existing\n indexes on mysql because we can't (and don't) drop them as part of the\n downgrade.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 28,
"vocab_size": 27
} | 32 | Python | 29 | 677c42227c08f705142f298ab88915f133cd94e5 | 0111_2_3_3_add_indexes_for_cascade_deletes.py | 43,184 | 11 | 61 | _mysql_tables_where_indexes_already_present | https://github.com/apache/airflow.git | Add indexes for CASCADE deletes for task_instance (#24488)
When we add foreign keys with ON DELETE CASCADE, and we delete rows in the foreign table, the database needs to join back to the referencing table. If there's no suitable index, then it can be slow to perform the deletes. | 89 | 0 | 7,856 | 13 | |
1 | 5 | def fit(self, X, y=None):
# param validation is done in fit_transform
self.fit_transform(X)
return self
| sklearn/decomposition/_truncated_svd.py | 34 | scikit-learn | {
"docstring": "Fit model on training data X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n ------... | 14 | Python | 14 | 7da7ba603d42398c6e7cf89ea5336b8aabac7bae | _truncated_svd.py | 260,585 | 3 | 20 | fit | https://github.com/scikit-learn/scikit-learn.git | MNT TrucatedSVD uses _validate_parameters (#23987)
Co-authored-by: jeremiedbb <jeremiedbb@yahoo.fr> | 42 | 0 | 76,359 | 7 | |
2 | 16 | def apply_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + intensity, min_x, max_x) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
... | keras/preprocessing/image.py | 144 | @keras_export('keras.preprocessing.image.random_channel_shift') | keras | {
"docstring": "Performs a channel shift.\n\n Args:\n x: Input tensor. Must be 3D.\n intensity: Transformation intensity.\n channel_axis: Index of axis for channels in the input tensor.\n\n Returns:\n Numpy image tensor.\n ",
"language": "en",
"n_whitespaces": 51,
"n_words": 28,
"vocab_... | 40 | Python | 29 | 373ad97c72ed1ac4b6898e85b2cfd7b016e4b469 | image.py | 268,943 | 8 | 89 | apply_channel_shift | https://github.com/keras-team/keras.git | Copy image utils from keras_preprocessing directly into core keras
This is not new code, we are just moving these utilities directly
into keras from keras-preprocessing.
For the library code, just fixed linting errors.
For the test code, had to do more major changes to port from pytest, but
hopefully any errors have ... | 51 | 1 | 79,775 | 10 |
1 | 19 | def _requeue_trial(self, trial):
self._scheduler_alg.on_trial_error(self, trial)
self.trial_executor.set_status(trial, Trial.PENDING)
# TODO(rliaw): Right now, this pushes the trial to the end of queue
# because restoration can be expensive. However, this is not
# ideal... | python/ray/tune/trial_runner.py | 148 | ray | {
"docstring": "Notification to TrialScheduler and requeue trial.\n\n This does not notify the SearchAlgorithm because the function\n evaluation is still in progress.\n\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 20,
"vocab_size": 19
} | 76 | Python | 60 | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | trial_runner.py | 132,862 | 10 | 86 | _requeue_trial | https://github.com/ray-project/ray.git | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | 204 | 0 | 29,841 | 14 | |
1 | 43 | def test_room_state_returned_when_knocking(self):
user_id = self.register_user("u1", "you the one")
user_token = self.login("u1", "you the one")
fake_knocking_user_id = "@user:other.example.com"
# Create a room with a room version that includes knocking
room_id = self.... | tests/federation/transport/test_knocking.py | 454 | synapse | {
"docstring": "\n Tests that specific, stripped state events from a room are returned after\n a remote homeserver successfully knocks on a local room.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 21,
"vocab_size": 19
} | 259 | Python | 149 | c3db7a0b59d48b8872bc24096f9a2467ef35f703 | test_knocking.py | 246,318 | 50 | 276 | test_room_state_returned_when_knocking | https://github.com/matrix-org/synapse.git | Tests: replace mocked Authenticator with the real thing (#11913)
If we prepopulate the test homeserver with a key for a remote homeserver, we
can make federation requests to it without having to stub out the
authenticator. This has two advantages:
* means that what we are testing is closer to reality (ie, we now... | 834 | 0 | 71,151 | 12 | |
3 | 7 | def should_overwrite(filepath, overwrite):
# If file exists and should not be overwritten.
if not overwrite and os.path.isfile(filepath):
return ask_to_proceed_with_overwrite(filepath)
return True
| keras/saving/saving_utils.py | 48 | keras | {
"docstring": "Returns whether the filepath should be overwritten.",
"language": "en",
"n_whitespaces": 6,
"n_words": 7,
"vocab_size": 7
} | 21 | Python | 18 | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | saving_utils.py | 276,251 | 4 | 28 | should_overwrite | https://github.com/keras-team/keras.git | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | 40 | 0 | 81,607 | 9 | |
17 | 14 | def make_system(A, M, x0, b):
A_ = A
A = aslinearoperator(A)
if A.shape[0] != A.shape[1]:
raise ValueError(f'expected square matrix, but got shape={(A.shape,)}')
N = A.shape[0]
b = asanyarray(b)
if not (b.shape == (N,1) or b.shape == (N,)):
raise ValueError(f'shapes of A... | scipy/sparse/linalg/_isolve/utils.py | 194 | scipy | {
"docstring": "Make a linear system Ax=b\n\n Parameters\n ----------\n A : LinearOperator\n sparse or dense matrix (or any valid input to aslinearoperator)\n M : {LinearOperator, Nones}\n preconditioner\n sparse or dense matrix (or any valid input to aslinearoperator)\n x0 : {arra... | 62 | Python | 48 | 5628849933f1ba002f34b88b4d3af24f68008b39 | utils.py | 241,795 | 51 | 379 | make_system | https://github.com/scipy/scipy.git | MAINT: sparse.linalg: Remove unnecessary operations | 132 | 0 | 69,699 | 13 | |
3 | 11 | def news(xml_news_url, counter):
| Google_News.py | 23 | """Print select details from a html response containing xmla html response containing | Python | {
"docstring": "Print select details from a html response containing xml",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | 3 | Python | 3 | f0af0c43340763724f139fa68aa1e5a9ffe458b4 | Google_News.py | 22,452 | 16 | 95 | news | https://github.com/geekcomputers/Python.git | refactor: clean code
Signed-off-by: slowy07 <slowy.arfy@gmail.com> | 6 | 2 | 4,346 | 5 |
4 | 16 | def _estimator_with_converted_arrays(estimator, converter):
from sklearn.base import clone
new_estimator = clone(estimator)
for key, attribute in vars(estimator).items():
if hasattr(attribute, "__array_namespace__") or isinstance(
attribute, numpy.ndarray
):
att... | sklearn/utils/_array_api.py | 107 | scikit-learn | {
"docstring": "Create new estimator which converting all attributes that are arrays.\n\n Parameters\n ----------\n estimator : Estimator\n Estimator to convert\n\n converter : callable\n Callable that takes an array attribute and returns the converted array.\n\n Returns\n -------\n ... | 31 | Python | 27 | 2710a9e7eefd2088ce35fd2fb6651d5f97e5ef8b | _array_api.py | 261,018 | 10 | 67 | _estimator_with_converted_arrays | https://github.com/scikit-learn/scikit-learn.git | ENH Adds Array API support to LinearDiscriminantAnalysis (#22554)
Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org>
Co-authored-by: Julien Jerphanion <git@jjerphan.xyz> | 89 | 0 | 76,626 | 12 | |
1 | 3 | def variables(self):
return self._weights
| keras/optimizers/optimizer_v2/optimizer_v2.py | 19 | keras | {
"docstring": "Returns variables of this Optimizer based on the order created.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | 4 | Python | 4 | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | optimizer_v2.py | 275,487 | 2 | 10 | variables | https://github.com/keras-team/keras.git | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | 18 | 0 | 81,391 | 6 | |
1 | 9 | def _get_link_annotation(gc, x, y, width, height):
link_annotation = {
'Type': Name('Annot'),
'Subtype': Name('Link'),
'Rect': (x, y, x + width, y + height),
'Border': [0, 0, 0],
'A': {
'S': Name('URI'),
'URI': gc.get_url(),
},
}
r... | lib/matplotlib/backends/backend_pdf.py | 132 | matplotlib | {
"docstring": "\n Create a link annotation object for embedding URLs.\n ",
"language": "en",
"n_whitespaces": 15,
"n_words": 8,
"vocab_size": 8
} | 36 | Python | 31 | 02c7ae22b4b1e7cc4fb70e18b208115f438f8f7b | backend_pdf.py | 108,704 | 12 | 80 | _get_link_annotation | https://github.com/matplotlib/matplotlib.git | Refactor URL handling | 112 | 0 | 23,311 | 13 | |
6 | 11 | def exclude_from_weight_decay(self, var_list=None, var_names=None):
if hasattr(self, "_built") and self._built:
raise ValueError(
"`exclude_from_weight_decay()` can only be configued before "
"the optimizer is built."
)
if var_list:
... | keras/optimizers/optimizer_experimental/optimizer.py | 113 | keras | {
"docstring": "Exclude variables from weight decay.\n\n This method must be called before the optimizer's `build` method is\n called. You can set specific variables to exclude out, or set a list of\n strings as the anchor words, if any of which appear in a variable's\n name, then the vari... | 43 | Python | 38 | 38b618ad90d669c85cccee521ad73cc0630cf750 | optimizer.py | 280,005 | 13 | 67 | exclude_from_weight_decay | https://github.com/keras-team/keras.git | Add general `weight_decay` support in optimizer.
We still keep adamw optimizer in case people want an explicit adamw. We can delete it in a followup cl.
PiperOrigin-RevId: 477043911 | 178 | 0 | 83,214 | 11 | |
12 | 14 | def render(pieces, style):
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep... | pandas/_version.py | 347 | pandas | {
"docstring": "Render the given version pieces into the requested style.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 8
} | 105 | Python | 53 | e2df99823758210fb2b7c4aba39e23f3445f7cd3 | _version.py | 171,634 | 36 | 186 | render | https://github.com/pandas-dev/pandas.git | BLD: use nonvendor versioneer (#49924)
* BLD: remove vendored versioneer
* run vis
* move config to pyproject.toml
* add versioneer to deps
* run pyupgrade
* fix isort and pylint
* fix ci
* fix env | 322 | 0 | 40,701 | 12 | |
3 | 10 | def get_local_agent_user_id(self, webhook_id):
found_agent_user_id = None
for agent_user_id, agent_user_data in self._store.agent_user_ids.items():
if agent_user_data[STORE_GOOGLE_LOCAL_WEBHOOK_ID] == webhook_id:
found_agent_user_id = agent_user_id
br... | homeassistant/components/google_assistant/helpers.py | 65 | core | {
"docstring": "Return the user ID to be used for actions received via the local SDK.\n\n Return None is no agent user id is found.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 23,
"vocab_size": 19
} | 21 | Python | 18 | 25fe213f222f8f49a8126130a8e507fa15e63c83 | helpers.py | 308,771 | 7 | 40 | get_local_agent_user_id | https://github.com/home-assistant/core.git | Enable local fulfillment google assistant (#63218)
Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io> | 90 | 0 | 107,509 | 10 | |
49 | 8 | def factor_nc(expr):
from sympy.simplify.simplify import powsimp
from sympy.polys import gcd, factor
| sympy/core/exprtools.py | 36 | sympy | {
"docstring": "Return the factored form of ``expr`` while handling non-commutative\n expressions.\n\n Examples\n ========\n\n >>> from sympy import factor_nc, Symbol\n >>> from sympy.abc import x\n >>> A = Symbol('A', commutative=False)\n >>> B = Symbol('B', commutative=False)\n >>> factor_nc... | 11 | Python | 9 | 498015021131af4dbb07eb110e5badaba8250c7b | exprtools.py | 196,233 | 132 | 983 | factor_nc | https://github.com/sympy/sympy.git | Updated import locations | 20 | 0 | 47,733 | 6 | |
1 | 5 | def on_template_context(self, context, template_name, config):
return context
| mkdocs/plugins.py | 22 | mkdocs | {
"docstring": "\n The `template_context` event is called immediately after the context is created\n for the subject template and can be used to alter the context for that specific\n template only.\n\n Parameters:\n context: dict of template context variables\n templa... | 7 | Python | 7 | f79b34d174e41084391868e7b503f5c61b8b1bdf | plugins.py | 224,448 | 2 | 14 | on_template_context | https://github.com/mkdocs/mkdocs.git | Move plugin events docs into source code + refactor
* Create real (no-op) methods for each event in the base class.
* Refactor event dispatcher to not check for methods' existence, instead just call them.
* Move documentation from Markdown into docstrings of these methods.
* Activate the 'mkdocstrings' plugin.
* Use '... | 21 | 0 | 57,293 | 6 | |
7 | 16 | def _download_and_process(self, kaggle_username=None, kaggle_key=None):
if self.state == DatasetState.NOT_LOADED:
try:
self.download(kaggle_username=kaggle_username, kaggle_key=kaggle_key)
except Exception:
logger.exception("Failed to download dat... | ludwig/datasets/loaders/dataset_loader.py | 175 | ludwig | {
"docstring": "Loads the dataset, downloaded and processing it if needed.\n\n If dataset is already processed, does nothing.\n ",
"language": "en",
"n_whitespaces": 30,
"n_words": 16,
"vocab_size": 16
} | 48 | Python | 28 | e4fc06f986e03919d9aef3ab55c05fee5a6b9d3a | dataset_loader.py | 8,071 | 17 | 101 | _download_and_process | https://github.com/ludwig-ai/ludwig.git | Config-first Datasets API (ludwig.datasets refactor) (#2479)
* Adds README and stub for reading dataset configs.
* Adds __init__.py for configs, moves circular import into function scope in ludwig/datasets/__init__.py
* Print config files in datasets folder.
* First pass at automatic archive extraction.
* ... | 261 | 0 | 1,324 | 13 | |
2 | 6 | def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
| .venv/lib/python3.8/site-packages/pip/_vendor/distlib/compat.py | 48 | transferlearning | {
"docstring": "OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S\n and values equal to v (which defaults to None).\n\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 18
} | 16 | Python | 14 | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | compat.py | 61,896 | 5 | 30 | fromkeys | https://github.com/jindongwang/transferlearning.git | upd; format | 75 | 0 | 12,743 | 9 | |
1 | 5 | def _get_permission_name(self, action):
return "%s.%s_%s" % (self.app_label, action, self.model_name)
| wagtail/core/permission_policies/base.py | 36 | wagtail | {
"docstring": "\n Get the full app-label-qualified permission name (as required by\n user.has_perm(...) ) for the given action on this model\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 18,
"vocab_size": 17
} | 9 | Python | 9 | d10f15e55806c6944827d801cd9c2d53f5da4186 | base.py | 73,922 | 2 | 22 | _get_permission_name | https://github.com/wagtail/wagtail.git | Reformat with black | 23 | 0 | 16,177 | 8 | |
1 | 7 | def get_children(self, request, parent):
raise NotImplementedError(f'{self.__class__.__name__} must implement get_children()')
| netbox/netbox/views/generic/object_views.py | 37 | netbox | {
"docstring": "\n Return a QuerySet of child objects.\n\n Args:\n request: The current request\n parent: The parent object\n ",
"language": "en",
"n_whitespaces": 59,
"n_words": 15,
"vocab_size": 14
} | 9 | Python | 9 | c5770392e32aeeaed9bd8dcf907a11c7df352b6c | object_views.py | 265,243 | 2 | 16 | get_children | https://github.com/netbox-community/netbox.git | Refactor ObjectChildrenView | 23 | 0 | 78,045 | 11 | |
1 | 6 | def fit(self, X, y=None):
X = check_array(X, accept_sparse='csr')
return self
| tpot/builtins/feature_transformers.py | 40 | tpot | {
"docstring": "Do nothing and return the estimator unchanged\n This method is just there to implement the usual API and hence\n work in pipelines.\n Parameters\n ----------\n X : array-like\n ",
"language": "en",
"n_whitespaces": 69,
"n_words": 27,
"vocab_size": 25
} | 10 | Python | 10 | 388616b6247ca4ea8de4e2f340d6206aee523541 | feature_transformers.py | 181,860 | 3 | 24 | fit | https://github.com/EpistasisLab/tpot.git | Revert "Deployed 7ccda9a with MkDocs version: 1.3.0"
This reverts commit bd9629c40e01241766197119b581a99409b07068. | 31 | 0 | 43,629 | 10 | |
1 | 7 | def get_hidden_input_context(self, name, value, attrs):
return super().get_context(name, value, attrs)
| wagtail/admin/widgets/chooser.py | 38 | wagtail | {
"docstring": "\n Return the context variables required to render the underlying hidden input element\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 11
} | 9 | Python | 8 | 0f7a365bf8bf72a4894c1ca447cf52ba67f40b0c | chooser.py | 77,527 | 2 | 25 | get_hidden_input_context | https://github.com/wagtail/wagtail.git | Avoid calling super().render() in BaseChooser
This frees us up to redefine template_name and get_context in subclasses without it interfering with the rendering of the hidden input. | 23 | 0 | 16,669 | 9 | |
1 | 15 | def test_with_no_current_site(self):
self.default_site.is_default_site = False
self.default_site.save()
start_url = reverse("wagtailsettings:edit", args=["tests", "testsetting"])
response = self.client.get(
start_url, follow=True, HTTP_HOST="noneoftheabove.example.c... | wagtail/contrib/settings/tests/test_admin.py | 113 | wagtail | {
"docstring": "\n Redirection should not break if the current request does not correspond to a site\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 14,
"vocab_size": 13
} | 20 | Python | 18 | d10f15e55806c6944827d801cd9c2d53f5da4186 | test_admin.py | 73,489 | 8 | 68 | test_with_no_current_site | https://github.com/wagtail/wagtail.git | Reformat with black | 80 | 0 | 16,026 | 11 | |
2 | 8 | def get_fiscal_year_data(from_fiscal_year, to_fiscal_year):
fiscal_year = frappe.db.sql(
,
{"from_fiscal_year": from_fiscal_year, "to_fiscal_year": to_fiscal_year},
as_dict=1,
)
return fiscal_year[0] if fiscal_year else {}
| erpnext/accounts/report/financial_statements.py | 66 | erpnext | {
"docstring": "select min(year_start_date) as year_start_date,\n\t\tmax(year_end_date) as year_end_date from `tabFiscal Year` where\n\t\tname between %(from_fiscal_year)s and %(to_fiscal_year)s",
"language": "en",
"n_whitespaces": 13,
"n_words": 16,
"vocab_size": 15
} | 19 | Python | 18 | 494bd9ef78313436f0424b918f200dab8fc7c20b | financial_statements.py | 65,238 | 9 | 42 | get_fiscal_year_data | https://github.com/frappe/erpnext.git | style: format code with black | 12 | 0 | 13,830 | 11 | |
3 | 12 | def _test_Qt_QtWebEngineQuick(pyi_builder, qt_flavor):
if is_darwin:
# QtWebEngine on Mac OS only works with a onedir build -- onefile builds do not work.
# Skip the test execution for onefile builds.
if pyi_builder._mode != 'onedir':
pytest.skip('QtWebEngine on macOS is supporte... | tests/functional/test_qt.py | 93 | @requires('PyQt5')
@requires('PyQtWebEngine') | pyinstaller | {
"docstring": "\n import sys\n\n from {0}.QtGui import QGuiApplication\n from {0}.QtQml import QQmlApplicationEngine\n\n is_qt6 = '{0}' in {{'PyQt6', 'PySide6'}}\n\n if is_qt6:\n from {0}.QtWebEngineQuick import QtWebEngineQuick\n else:\n from {0}.QtWeb... | 110 | Python | 86 | 947a96a8d2fc80bb76a4492fc9b631d642cf5065 | test_qt.py | 262,817 | 66 | 40 | _test_Qt_QtWebEngineQuick | https://github.com/pyinstaller/pyinstaller.git | tests: fixup QtWebEngine Qml/Quick test | 762 | 1 | 77,384 | 12 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.