mysql> CREATE TABLE child ( -> id INT(10) NOT NULL PRIMARY KEY, -> parent_id INT(10), -> FOREIGN KEY (parent_id) REFERENCES `parent`(`id`) -> ) ENGINE INNODB; ERROR 1215 (HY000): Cannot add foreign key constraint # We check for the parent table and is not there. mysql> SHOW TABLES LIKE 'par%'; Empty set (0.00 sec) # We go ahead and create the parent table (we’ll use the same parent table structure for all other example in this blogpost): mysql> CREATE TABLE parent ( -> id INT(10) NOT NULL PRIMARY KEY, -> column_1 INT(10) NOT NULL, -> column_2 INT(10) NOT NULL, -> column_3 INT(10) NOT NULL, -> column_4 CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin, -> KEY column_2_column_3_idx (column_2, column_3), -> KEY column_4_idx (column_4) -> ) ENGINE INNODB; Query OK, 0 rows affected (0.00 sec) # And now we re-attempt to create the child table mysql> CREATE TABLE child ( -> id INT(10) NOT NULL PRIMARY KEY,drop table child; -> parent_id INT(10), -> FOREIGN KEY (parent_id) REFERENCES `parent`(`id`) -> ) ENGINE INNODB; Query OK, 0 rows affected (0.01 sec)
表或者约束引用的索引错误引号
如何诊断: 检查所有的外键,确保所有的引用正确 如何解决: 加上缺少的引号
例子:
1 2 3 4 5 6 7 8 9 10 11
# wrong; single pair of backticks wraps both table and column ALTER TABLE child ADD FOREIGN KEY (parent_id) REFERENCES `parent(id)`; # correct; one pair for each part ALTER TABLE child ADD FOREIGN KEY (parent_id) REFERENCES `parent`(`id`); # also correct; no backticks anywhere ALTER TABLE child ADD FOREIGN KEY (parent_id) REFERENCES parent(id); # also correct; backticks on either object (in case it’s a keyword) ALTER TABLE child ADD FOREIGN KEY (parent_id) REFERENCES parent(`id`);
这里需要注意引用表需要加上对应的列名,不能因为当前表的列名与引用表的主键名一致而忽略。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
create table department (dept_name varchar(20), building varchar(15), budget numeric(12,2) check (budget > 0), primary key (dept_name) ); create table course (course_id varchar(8), title varchar(50), dept_name varchar(20), credits numeric(2,0) check (credits > 0), primary key (course_id), foreign key (dept_name) references department(dept_name) on delete set null ) ;
# wrong; column_1 is not indexed in our example table CREATE TABLE child ( id INT(10) NOT NULL PRIMARY KEY, parent_column_1 INT(10), FOREIGN KEY (parent_column_1) REFERENCES `parent`(`column_1`) ) ENGINE INNODB; # correct; we first add an index and then re-attempt creation of child table ALTER TABLE parent ADD INDEX column_1_idx(column_1); # and then re-attempt creation of child table CREATE TABLE child ( id INT(10) NOT NULL PRIMARY KEY, parent_column_1 INT(10), FOREIGN KEY (parent_column_1) REFERENCES `parent`(`column_1`) ) ENGINE INNODB;
# wrong; column_3 only appears as the second part of an index on parent table CREATE TABLE child ( id INT(10) NOT NULL PRIMARY KEY, parent_column_3 INT(10), FOREIGN KEY (parent_column_3) REFERENCES `parent`(`column_3`) ) ENGINE INNODB; # correct; create a new index for the referenced column ALTER TABLE parent ADD INDEX column_3_idx (column_3); # then re-attempt creation of child CREATE TABLE child ( id INT(10) NOT NULL PRIMARY KEY, parent_column_3 INT(10), FOREIGN KEY (parent_column_3) REFERENCES `parent`(`column_3`) ) ENGINE INNODB;
两个表或者列使用不同的字符集或者排序
如何诊断: 比较父类表和子类表的字符集和排序 如何解决: 修改子类的字符集和排序
例子:
1 2 3 4 5 6 7 8 9 10 11 12 13
# wrong; the parent table uses utf8/utf8_bin for charset/collation CREATE TABLE child ( id INT(10) NOT NULL PRIMARY KEY, parent_column_4 CHAR(10) CHARACTER SET utf8 COLLATE utf8_unicode_ci, FOREIGN KEY (parent_column_4) REFERENCES `parent`(`column_4`) ) ENGINE INNODB; # correct; edited DDL so COLLATE matches parent definition CREATE TABLE child ( id INT(10) NOT NULL PRIMARY KEY, parent_column_4 CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin, FOREIGN KEY (parent_column_4) REFERENCES `parent`(`column_4`) ) ENGINE INNODB;
# wrong; the parent table in this example is MyISAM: CREATE TABLE parent ( id INT(10) NOT NULL PRIMARY KEY ) ENGINE MyISAM; # correct: we modify the parent’s engine ALTER TABLE parent ENGINE=INNODB;
# wrong; only parent table name is specified in REFERENCES CREATE TABLE child ( id INT(10) NOT NULL PRIMARY KEY, column_2 INT(10) NOT NULL, FOREIGN KEY (column_2) REFERENCES parent ) ENGINE INNODB; # correct; both the table and column are in the REFERENCES definition CREATE TABLE child ( id INT(10) NOT NULL PRIMARY KEY, column_2 INT(10) NOT NULL, FOREIGN KEY (column_2) REFERENCES parent(column_2) ) ENGINE INNODB;
父表被分割了
如何诊断: 检查父表是否被分割 如何修复: 合并被分割的部分
例子:
1 2 3 4 5 6 7 8 9
# wrong: the parent table we see below is using PARTITIONs CREATE TABLE parent ( id INT(10) NOT NULL PRIMARY KEY ) ENGINE INNODB PARTITION BY HASH(id) PARTITIONS 6; #correct: ALTER parent table to remove partitioning ALTER TABLE parent REMOVE PARTITIONING;
引用的列是虚列
如何诊断: 检查引用的列是否是虚列 如何修复: 修改父表的列,确保列不是虚列
例子:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
# wrong; this parent table has a generated virtual column CREATE TABLE parent ( id INT(10) NOT NULL PRIMARY KEY, column_1 INT(10) NOT NULL, column_2 INT(10) NOT NULL, column_virt INT(10) AS (column_1 + column_2) NOT NULL, KEY column_virt_idx (column_virt) ) ENGINE INNODB; # correct: make the column STORED so it can be used as a foreign key ALTER TABLE parent DROP COLUMN column_virt, ADD COLUMN column_virt INT(10) AS (column_1 + column_2) STORED NOT NULL; # And now the child table can be created pointing to column_virt CREATE TABLE child ( id INT(10) NOT NULL PRIMARY KEY, parent_virt INT(10) NOT NULL, FOREIGN KEY (parent_virt) REFERENCES parent(column_virt) ) ENGINE INNODB;
# wrong; the constraint action uses SET DEFAULT CREATE TABLE child ( id INT(10) NOT NULL PRIMARY KEY, parent_id INT(10) NOT NULL, FOREIGN KEY (parent_id) REFERENCES parent(id) ON UPDATE SET DEFAULT ) ENGINE INNODB; # correct; there's no alternative to SET DEFAULT, removing or picking other is the corrective measure CREATE TABLE child ( id INT(10) NOT NULL PRIMARY KEY, parent_id INT(10) NOT NULL, FOREIGN KEY (parent_id) REFERENCES parent(id) ) ENGINE INNODB;
Eureka is a REST (Representational State Transfer) based service that is primarily used in the AWS cloud for locating services for the purpose of load balancing and failover of middle-tier servers. We call this service, the Eureka Server. Eureka also comes with a Java-based client component,the Eureka Client, which makes interactions with the service much easier. The client also has a built-in load balancer that does basic round-robin load balancing. At Netflix, a much more sophisticated load balancer wraps Eureka to provide weighted load balancing based on several factors like traffic, resource usage, error conditions etc to provide superior resiliency.
/** * Get the list of all eureka service urls from properties file for the eureka client to talk to. * * @param clientConfig the clientConfig to use * @param instanceZone The zone in which the client resides * @param preferSameZone true if we have to prefer the same zone as the client, false otherwise * @return an (ordered) map of zone -> list of urls mappings, with the preferred zone first in iteration order */ publicstatic Map<String, List<String>> getServiceUrlsMapFromConfig(EurekaClientConfig clientConfig, String instanceZone, boolean preferSameZone) { Map<String, List<String>> orderedUrls = newLinkedHashMap<>(); Stringregion= getRegion(clientConfig); String[] availZones = clientConfig.getAvailabilityZones(clientConfig.getRegion()); if (availZones == null || availZones.length == 0) { availZones = newString[1]; availZones[0] = DEFAULT_ZONE; } logger.debug("The availability zone for the given region {} are {}", region, availZones); intmyZoneOffset= getZoneOffset(instanceZone, preferSameZone, availZones);
@Override publicvoidnotify(StatusChangeEvent statusChangeEvent) { if (InstanceStatus.DOWN == statusChangeEvent.getStatus() || InstanceStatus.DOWN == statusChangeEvent.getPreviousStatus()) { // log at warn level if DOWN was involved logger.warn("Saw local status change event {}", statusChangeEvent); } else { logger.info("Saw local status change event {}", statusChangeEvent); } instanceInfoReplicator.onDemandUpdate(); } };
if (clientConfig.shouldOnDemandUpdateStatusChange()) { applicationInfoManager.registerStatusChangeListener(statusChangeListener); }
instanceInfoReplicator.start(clientConfig.getInitialInstanceInfoReplicationIntervalSeconds()); } else { logger.info("Not registering with Eureka server per configuration"); } }
LongdirtyTimestamp= instanceInfo.isDirtyWithTime(); if (dirtyTimestamp != null) { discoveryClient.register(); instanceInfo.unsetIsDirty(dirtyTimestamp); } } catch (Throwable t) { logger.warn("There was a problem with the instance info replicator", t); } finally { Futurenext= scheduler.schedule(this, replicationIntervalSeconds, TimeUnit.SECONDS); scheduledPeriodicRef.set(next); } }
刷新实例信息。更新服务注册,如果注册时间过期,重新注册
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/** * Register with the eureka service by making the appropriate REST call. */ booleanregister()throws Throwable { logger.info(PREFIX + "{}: registering service...", appPathIdentifier); EurekaHttpResponse<Void> httpResponse; try { httpResponse = eurekaTransport.registrationClient.register(instanceInfo); } catch (Exception e) { logger.warn(PREFIX + "{} - registration failed {}", appPathIdentifier, e.getMessage(), e); throw e; } if (logger.isInfoEnabled()) { logger.info(PREFIX + "{} - registration status: {}", appPathIdentifier, httpResponse.getStatusCode()); } return httpResponse.getStatusCode() == 204; }
booleanremoteRegionsModified=false; // This makes sure that a dynamic change to remote regions to fetch is honored. StringlatestRemoteRegions= clientConfig.fetchRegistryForRemoteRegions(); if (null != latestRemoteRegions) { StringcurrentRemoteRegions= remoteRegionsToFetch.get(); if (!latestRemoteRegions.equals(currentRemoteRegions)) { // Both remoteRegionsToFetch and AzToRegionMapper.regionsToFetch need to be in sync synchronized (instanceRegionChecker.getAzToRegionMapper()) { if (remoteRegionsToFetch.compareAndSet(currentRemoteRegions, latestRemoteRegions)) { String[] remoteRegions = latestRemoteRegions.split(","); remoteRegionsRef.set(remoteRegions); instanceRegionChecker.getAzToRegionMapper().setRegionsToFetch(remoteRegions); remoteRegionsModified = true; } else { logger.info("Remote regions to fetch modified concurrently," + " ignoring change from {} to {}", currentRemoteRegions, latestRemoteRegions); } } } else { // Just refresh mapping to reflect any DNS/Property change instanceRegionChecker.getAzToRegionMapper().refreshMapping(); } }
// handle cases where clients may be registering with bad DataCenterInfo with missing data DataCenterInfodataCenterInfo= info.getDataCenterInfo(); if (dataCenterInfo instanceof UniqueIdentifier) { StringdataCenterInfoId= ((UniqueIdentifier) dataCenterInfo).getId(); if (isBlank(dataCenterInfoId)) { booleanexperimental="true".equalsIgnoreCase(serverConfig.getExperimental("registration.validation.dataCenterInfoId")); if (experimental) { Stringentity="DataCenterInfo of type " + dataCenterInfo.getClass() + " must contain a valid id"; return Response.status(400).entity(entity).build(); } elseif (dataCenterInfo instanceof AmazonInfo) { AmazonInfoamazonInfo= (AmazonInfo) dataCenterInfo; StringeffectiveId= amazonInfo.get(AmazonInfo.MetaDataKey.instanceId); if (effectiveId == null) { amazonInfo.getMetadata().put(AmazonInfo.MetaDataKey.instanceId.getName(), info.getId()); } } else { logger.warn("Registering DataCenterInfo of type {} without an appropriate id", dataCenterInfo.getClass()); } } }
registry.register(info, "true".equals(isReplication)); return Response.status(204).build(); // 204 to be backwards compatible }
@Override public Object invoke(Object proxy, Method method, Object[] args)throws Throwable { Log.d(TAG, "hey, baby; you are hooked!!"); Log.d(TAG, "method:" + method.getName() + " called with args:" + Arrays.toString(args));
return method.invoke(mBase, args); } }
当IActivityManager触发调用时,invoke回调,这个时候会打印出添加的内容
1 2
10-0416:04:13.09217046-17046/cn.binea.pluginframeworkdemo D/HookHandler: hey, baby; you are hooked!! 10-0416:04:13.09317046-17046/cn.binea.pluginframeworkdemo D/HookHandler: method:activityIdle called with args:[android.os.BinderProxy@e0086ab, {1.0 310mcc260mnc [en_US] ldltr sw360dp w360dp h568dp 480dpi nrml port finger qwerty/v/v -nav/h s.6}, false]
PMS
1 2 3 4 5 6 7 8 9 10 11 12 13 14
@Override public PackageManager getPackageManager() { if (mPackageManager != null) { return mPackageManager; }
IPackageManagerpm= ActivityThread.getPackageManager(); if (pm != null) { // Doesn't matter if we make more than one instance. return (mPackageManager = newApplicationPackageManager(this, pm)); }
@Throws(Exception::class) funhookClipboardService() { val CLIPBOARD_SERVICE = "clipboard" val serviceManager = Class.forName("android.os.ServiceManager") val getService = serviceManager.getDeclaredMethod("getService", String::class.java) val rawBinder = getService.invoke(null, CLIPBOARD_SERVICE) as IBinder val hookedBinder = Proxy.newProxyInstance(serviceManager.classLoader, arrayOf(IBinder::class.java), BinderProxyHookHandlerKt(rawBinder)) val cacheField = serviceManager.getDeclaredField("sCache") cacheField.isAccessible = true val cache = cacheField.get(null) as HashMap<String, IBinder> cache.put(CLIPBOARD_SERVICE, hookedBinder as IBinder) } } }
//代理方法 classInvocationHandlerImpl(val base: Any) : InvocationHandler { override fun invoke(p0: Any?, p1: Method?, p2: Array<out Any>?): Any {
if ("doSomething" == (p1!!.name)) { val value: Long = p2!![0] as Long valdoSthValue= value * 5 println(doSthValue) val invoke: Array<Any> = p1.invoke(base, doSthValue) as Array<Any> invoke[0] = "d" return invoke }
voidperformReceiveLocked(ProcessRecord app, IIntentReceiver receiver, Intent intent, int resultCode, String data, Bundle extras, boolean ordered, boolean sticky, int sendingUser)throws RemoteException { // Send the intent to the receiver asynchronously using one-way binder calls. if (app != null) { if (app.thread != null) { // If we have an app thread, do the call through that so it is // correctly ordered with other one-way calls. try { app.thread.scheduleRegisteredReceiver(receiver, intent, resultCode, data, extras, ordered, sticky, sendingUser, app.repProcState); // TODO: Uncomment this when (b/28322359) is fixed and we aren't getting // DeadObjectException when the process isn't actually dead. //} catch (DeadObjectException ex) { // Failed to call into the process. It's dying so just let it die and move on. // throw ex; } catch (RemoteException ex) { // Failed to call into the process. It's either dying or wedged. Kill it gently. synchronized (mService) { Slog.w(TAG, "Can't deliver broadcast to " + app.processName + " (pid " + app.pid + "). Crashing it."); app.scheduleCrash("can't deliver broadcast"); } throw ex; } } else { // Application has died. Receiver doesn't exist. thrownewRemoteException("app.thread must not be null"); } } else { receiver.performReceive(intent, resultCode, data, extras, ordered, sticky, sendingUser); } }
finalintbroadcastIntentLocked(ProcessRecord callerApp, String callerPackage, Intent intent, String resolvedType, IIntentReceiver resultTo, int resultCode, String resultData, Bundle resultExtras, String[] requiredPermissions, int appOp, Bundle bOptions, boolean ordered, boolean sticky, int callingPid, int callingUid, int userId) { ...
if (!ordered && NR > 0) { // If we are not serializing this broadcast, then send the // registered receivers separately so they don't wait for the // components to be launched. if (isCallerSystem) { checkBroadcastFromSystem(intent, callerApp, callerPackage, callingUid, isProtectedBroadcast, registeredReceivers); } finalBroadcastQueuequeue= broadcastQueueForIntent(intent); BroadcastRecordr=newBroadcastRecord(queue, intent, callerApp, callerPackage, callingPid, callingUid, callerInstantApp, resolvedType, requiredPermissions, appOp, brOptions, registeredReceivers, resultTo, resultCode, resultData, resultExtras, ordered, sticky, false, userId); if (DEBUG_BROADCAST) Slog.v(TAG_BROADCAST, "Enqueueing parallel broadcast " + r); finalbooleanreplaced= replacePending && (queue.replaceParallelBroadcastLocked(r) != null); // Note: We assume resultTo is null for non-ordered broadcasts. if (!replaced) { queue.enqueueParallelBroadcastLocked(r); queue.scheduleBroadcastsLocked(); } registeredReceivers = null; NR = 0; } }
privatevoidhandleReceiver(ReceiverData data) { // If we are getting ready to gc after going to the background, well // we are back active so skip it. unscheduleGcIdler();
// String[] paths = classPath.split(":"); // URL[] urls = new URL[paths.length]; // for (int i = 0; i < paths.length; i++) { // try { // urls[i] = new URL("file://" + paths[i]); // } // catch (Exception ex) { // ex.printStackTrace(); // } // } // // return new java.net.URLClassLoader(urls, null);
// TODO Make this a java.net.URLClassLoader once we have those? returnnewPathClassLoader(classPath, librarySearchPath, BootClassLoader.getInstance()); }
privatestatic Element[] makeDexElements(List<File> files, File optimizedDirectory, List<IOException> suppressedExceptions, ClassLoader loader) { Element[] elements = newElement[files.size()]; intelementsPos=0; /* * Open all files and load the (direct or contained) dex files up front. */ for (File file : files) { if (file.isDirectory()) { // We support directories for looking up resources. Looking up resources in // directories is useful for running libcore tests. elements[elementsPos++] = newElement(file); } elseif (file.isFile()) { Stringname= file.getName();
if (name.endsWith(DEX_SUFFIX)) { // Raw dex file (not inside a zip/jar). try { DexFiledex= loadDexFile(file, optimizedDirectory, loader, elements); if (dex != null) { elements[elementsPos++] = newElement(dex, null); } } catch (IOException suppressed) { System.logE("Unable to load dex file: " + file, suppressed); suppressedExceptions.add(suppressed); } } else { DexFiledex=null; try { dex = loadDexFile(file, optimizedDirectory, loader, elements); } catch (IOException suppressed) { /* * IOException might get thrown "legitimately" by the DexFile constructor if * the zip file turns out to be resource-only (that is, no classes.dex file * in it). * Let dex == null and hang on to the exception to add to the tea-leaves for * when findClass returns null. */ suppressedExceptions.add(suppressed); }
virtualintopenContentUri(const String16& stringUri) { Parcel data, reply; data.writeInterfaceToken(IActivityManager::getInterfaceDescriptor()); data.writeString16(stringUri); status_t ret = remote()->transact(OPEN_CONTENT_URI_TRANSACTION, data, & reply); int fd = -1; if (ret == NO_ERROR) { int32_t exceptionCode = reply.readExceptionCode(); if (!exceptionCode) { // Success is indicated here by a nonzero int followed by the fd; // failure by a zero int with no data following. if (reply.readInt32() != 0) { fd = fcntl(reply.readParcelFileDescriptor(), F_DUPFD_CLOEXEC, 0); } } else { // An exception was thrown back; fall through to return failure ALOGD("openContentUri(%s) caught exception %d\n", String8(stringUri).string(), exceptionCode); } } return fd; } };
/** * How many redirects and auth challenges should we attempt? Chrome follows 21 redirects; Firefox, * curl, and wget follow 20; Safari follows 16; and HTTP/1.0 recommends 5. */ privatestaticfinalintMAX_FOLLOW_UPS=20;
// We need the network to satisfy this request. Possibly for validating a conditional GET. booleandoExtensiveHealthChecks= !request.method().equals("GET"); HttpCodechttpCodec= streamAllocation.newStream(client, chain, doExtensiveHealthChecks); RealConnectionconnection= streamAllocation.connection(); ... }
private RealConnection findConnection(int connectTimeout, int readTimeout, int writeTimeout, boolean connectionRetryEnabled)throws IOException { ... if (this.connection != null) { // We had an already-allocated connection and it's good. result = this.connection; releasedConnection = null; }
if (result == null) { // Attempt to get a connection from the pool. Internal.instance.get(connectionPool, address, this, null); if (connection != null) { foundPooledConnection = true; result = connection; } else { selectedRoute = route; } } ... for (inti=0, size = routes.size(); i < size; i++) { Routeroute= routes.get(i); Internal.instance.get(connectionPool, address, this, route); if (connection != null) { foundPooledConnection = true; result = connection; this.route = route; break; } } ... result.connect( connectTimeout, readTimeout, writeTimeout, connectionRetryEnabled, call, eventListener); }
longcleanup(long now) { ... if (pruneAndGetAllocationCount(connection, now) > 0) { inUseConnectionCount++; continue; } ... if (idleDurationNs > longestIdleDurationNs) { longestIdleDurationNs = idleDurationNs; longestIdleConnection = connection; } ... if (longestIdleDurationNs >= this.keepAliveDurationNs || idleConnectionCount > this.maxIdleConnections) { // We've found a connection to evict. Remove it from the list, then close it below (outside // of the synchronized block). connections.remove(longestIdleConnection); } }
if (HttpMethod.invalidatesCache(response.request().method())) { try { remove(response.request()); } catch (IOException ignored) { // The cache cannot be written. } returnnull; } if (!requestMethod.equals("GET")) { // Don't cache non-GET responses. We're technically allowed to cache // HEAD requests and some POST requests, but the complexity of doing // so is high and the benefit is low. returnnull; }
if (HttpHeaders.hasVaryAll(response)) { returnnull; }