<?xml version="1.0" encoding="utf-8" ?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:syn="http://purl.org/rss/1.0/modules/syndication/" xmlns="http://purl.org/rss/1.0/">




    



<channel rdf:about="https://cis-india.org/search_rss">
  <title>Centre for Internet and Society</title>
  <link>https://cis-india.org</link>
  
  <description>
    
            These are the search results for the query, showing results 141 to 155.
        
  </description>
  
  
  
  
  <image rdf:resource="https://cis-india.org/logo.png"/>

  <items>
    <rdf:Seq>
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/community-standards-roundtable-conversations"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/networked-economies-and-gender-action-learning"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/bloomberg-quint-nishant-sharma-september-27-2018-after-sc-setback-fintech-firms-await-clarity-on-aadhaar"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/cross-border-data-sharing-and-india-a-study-in-processes-content-and-capacity"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/takshashilas-online-cogitatum-on-ai-and-ethics-in-india"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/sflc-round-table-discussion-on-personal-data-protection-bill"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/forecasting-the-implications-of-the-cloud-act-around-the-world"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/conference-on-data-protection"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/meeting-of-information-systems-security-and-biometrics-sectional-committee"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/gender-and-privacy-countering-the-patriarchal-gaze"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/symposium-on-data-privacy-and-citizens-rights"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/artificial-intelligence-in-the-governance-sector-in-india"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/ai-in-india-a-policy-agenda"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/blog/the-srikrishna-committee-data-protection-bill-and-artificial-intelligence-in-india"/>
        
        
            <rdf:li rdf:resource="https://cis-india.org/internet-governance/news/celebrating-one-year-of-the-justice-k-s-puttaswamy-v-union-of-india-judgment"/>
        
    </rdf:Seq>
  </items>

</channel>


    <item rdf:about="https://cis-india.org/internet-governance/news/community-standards-roundtable-conversations">
    <title>Community Standards Roundtable Conversations</title>
    <link>https://cis-india.org/internet-governance/news/community-standards-roundtable-conversations</link>
    <description>
        &lt;b&gt;Ambika Tandon was a participant in a roundtable organized by Facebook, School of Media &amp; Cultural Studies, and Tata Institute of Social Sciences in Bengaluru on October 7, 2018.&lt;/b&gt;
        &lt;p&gt;The agenda for the roundtable was to discuss their community standards, particularly hate speech and harassment, and receive feedback from a feminist and gendered lens. &lt;a class="external-link" href="http://cis-india.org/internet-governance/files/community-standards-roundtable-conversations"&gt;Click&lt;/a&gt; to read more.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/community-standards-roundtable-conversations'&gt;https://cis-india.org/internet-governance/news/community-standards-roundtable-conversations&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Freedom of Speech and Expression</dc:subject>
    
    
        <dc:subject>Hate Speech</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-10-16T14:01:55Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/networked-economies-and-gender-action-learning">
    <title>Networked Economies and Gender Action Learning</title>
    <link>https://cis-india.org/internet-governance/news/networked-economies-and-gender-action-learning</link>
    <description>
        &lt;b&gt;Elonnai Hickok, Sunil Abraham and Ambika Tandon participated in a meeting organized by IDRC for grantees under their networked economies programme to discuss gender-based outputs and development outcomes in their work. The event was held in Ottawa on September 20 - 21, 2018, facilitated by Gender at Work.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;Sunil Abraham, Swaraj Paul Barooah and Ambika Tandon also attended a workshop on Gender Action Learning on September 24 - 25, 2018, which discussed strategies to work on gender under a grant for Cyber Policy Centres. Other organizations present at the workshop were Research ICT Africa, Lirne Asia, and Centre Latam Digital at CIDE,  Mexico. Gender at Work facilitated this workshop as well, and will be  working with all the grantees over a period of 18 months.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/networked-economies-and-gender-action-learning'&gt;https://cis-india.org/internet-governance/news/networked-economies-and-gender-action-learning&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Gender</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-10-02T03:10:02Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/bloomberg-quint-nishant-sharma-september-27-2018-after-sc-setback-fintech-firms-await-clarity-on-aadhaar">
    <title>After Supreme Court Setback, Fintech Firms Await Clarity On Aadhaar</title>
    <link>https://cis-india.org/internet-governance/news/bloomberg-quint-nishant-sharma-september-27-2018-after-sc-setback-fintech-firms-await-clarity-on-aadhaar</link>
    <description>
        &lt;b&gt;The 12-digit Aadhaar number is now out of bounds for fintech companies in India.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The article by Nishant Sharma was &lt;a class="external-link" href="https://www.bloombergquint.com/aadhaar/after-supreme-court-setback-fintech-firms-await-clarity-on-aadhaar"&gt;published in Bloomberg Quint&lt;/a&gt; on September 27, 2018. Pranesh Prakash was quoted.&lt;/p&gt;
&lt;hr /&gt;
&lt;h3&gt;Video&lt;/h3&gt;
&lt;p&gt;&lt;iframe frameborder="0" height="315" src="https://www.youtube.com/embed/FiEbZcL3lnY" width="560"&gt;&lt;/iframe&gt;&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;With the Supreme Court on Wednesday terming Aadhaar authentication by private companies as “&lt;a href="https://www.bloombergquint.com/law-and-policy/2018/09/26/aadhaar-a-quick-summary-of-the-supreme-court-majority-order" target="_blank"&gt;unconstitutional&lt;/a&gt;”,  companies such as online wallets and e-tailers, among others, will now  have to make changes to how they onboard and verify customers, in  addition to how they transact.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In a 567-page majority judgment  authored by Justice Sikri and concurred upon by two other judges—Chief  Justice Dipak Misra and Justice AM Khanwilkar—it said that Section 57 of  the Aadhaar Act, which allows private companies to use Aadhaar for  authentication services based on a contract between the corporate and an  individual, would enable commercial exploitation of private data and  hence is unconstitutional.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;“What it essentially means is that the  private bodies, such as lending platforms, wallets, or any private  entity, cannot use Aadhaar for authentication,” said Anirudh Rastogi  founder at Ikigai Law (formerly TRA), a law firm that specialises in  representing businesses on data privacy.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The decision is set to  impact private companies right from Flipkart-owned PhonePe, Paytm,  Reliance Jio and Amazon, among others, which rely on Aadhaar for  e-verification. Amazon recently launched cardless equated monthly  installments on Amazon Pay through the digital finance platform Capital  Float and asked customers to provide Aadhaar numbers or virtual ID and  PAN details on the Amazon app for verification.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;'Aadhaar Is Just Another ID'&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Pranesh  Prakash, fellow, Centre for Internet and Society, said that with this  judgment Aadhaar is no longer an identity infrastructure as its creators  have dreamt of. “It is now just another ID.”&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;For those opposed to  Aadhaar, on privacy and security grounds, this may be a part victory.  But for the Fintech industry it stymies the use of quick Aadhaar-based  e-KYC (know your customer norms) to onboard customers. “The fintech  industry thrives on the instant paperless mantra, and this move will  curb its rapid growth, ” Amrish Rau, co-founder of PayU, said in a text  message.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The verdict is also set to push up costs for the  industry. Rau said: “Conducting physical KYC would be a costly affair,  with every physical KYC costing about Rs 100 per person.”&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Companies  like PhonePe await more clarity. “We are waiting to hear from bodies  like the Reserve Bank of India, UIDAI on what KYC that will be required  for wallets moving ahead," Sameer Nigam, cofounder of PhonePe, said.  "Whether we go to no KYC, lower limit environment or go to the physical  KYC environment."&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The  judgment also stated that the identification number will not be  mandatory for opening bank accounts, mobile-phone connections or for  admissions into educational institutions. However, Aadhaar will continue  to be mandatory for the distribution of state-sponsored welfare schemes  including direct benefit transfers and the public distribution system.  Taxpayers will have to link their Permanent Account Numbers to the  biometric database.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Aadhaar-Based KYC: Allowed With Consent?&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;The  Supreme Court has concluded that the part of section 57 which enables  body corporate and individuals also to seek authentication, that too on  the basis of a contract between the individual and such body corporate  or person, would impinge upon the right to privacy of such individuals.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Prasanna  S, a Supreme Court advocate and lawyer for one of the petitioners in  the Aadhaar matter interpreted it to mean that even if a customer  voluntarily wants to use Aadhaar for e-KYC, businesses cannot accept it.&lt;/p&gt;
&lt;blockquote style="text-align: justify; "&gt;They  have struck down the part of Section 57 that allows use of Aadhaar  based on a contract. A contract, by nature is voluntary, But since the  court has struck down this part, even voluntary use won’t be permitted.&lt;/blockquote&gt;
&lt;p style="text-align: justify; "&gt;Prasanna S, Advocate, Supreme Court&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Jaitley Hints At Legal Backing&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Meanwhile,  Finance Minister Arun Jaitley on Wednesday hinted that the Centre is  likely to examine whether separate legal backing is needed for Section  57 of the Aadhaar Act, the newswire PTI reported. “So, let us first read  the judgement. There are two-three prohibited areas. Are they because  they are totally prohibited or are they because they need legal  backing,” Jaitley was quoted as saying.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Rastogi of Ikigai Law said  that the court has left open for the government to promulgate a law to  enable private parties to use Aadhaar that can withstand judicial  scrutiny.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Rahul  Matthan, a technology partner at law firm Trilegal differed with this  view. He said that since the apex court has ruled that private entities  cannot access the Aadhaar infrastructure, it means that even if the  government brings a specific law to allow for that, it would be  unconstitutional.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Prasanna agreed with this interpretation.&lt;/p&gt;
&lt;blockquote style="text-align: justify; "&gt;The  court has hinted that commercial exploitation of personal information  will fail the proportionality test laid down by it in the Right to  Privacy judgment. This is one of the grounds for them to conclude that  Section 57 is unconstitutional. So even a law is introduced, private  access will be impermissible.&lt;/blockquote&gt;
&lt;p style="text-align: justify; "&gt;Prasanna S, Advocate, Supreme Court&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Are Aadhaar-Based KYCs Tainted?&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Since  the use of Aadhaar by private entities has been struck down, does it  mean entities who have used it for KYC so far have to re-do that  exercise? And data that was collected as part of Aadhaar-based KYC- does  that need to be deleted?&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The majority order hasn’t specifically  addressed these questions, Matthan pointed out. But went on to explain  that his reading of the judgment is that the court wants things to  remain as they are.&lt;/p&gt;
&lt;blockquote style="text-align: justify; "&gt;The  Supreme Court has said that collection of data before the Aadhaar Act  was introduced is valid. If you follow that sentiment, may be we can  argue that there’s no requirement to delete the data.&lt;/blockquote&gt;
&lt;p style="text-align: justify; "&gt;Rahul Matthan, Partner, Trilegal&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;br /&gt;Whatever  has been done without the authority of law has to go, Prasanna said.  But this outcome may not be practical and another hearing before the  Supreme Court may be required to clear these questions, he added.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Private  entities such as the online cab aggregator Ola have already removed  eKYC from its e-wallet when BloombergQuint last checked. Others may  follow suit.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/bloomberg-quint-nishant-sharma-september-27-2018-after-sc-setback-fintech-firms-await-clarity-on-aadhaar'&gt;https://cis-india.org/internet-governance/news/bloomberg-quint-nishant-sharma-september-27-2018-after-sc-setback-fintech-firms-await-clarity-on-aadhaar&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Aadhaar</dc:subject>
    
    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-10-01T23:39:42Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/cross-border-data-sharing-and-india-a-study-in-processes-content-and-capacity">
    <title>Cross-Border Data Sharing and India: A study in Processes, Content and Capacity</title>
    <link>https://cis-india.org/internet-governance/blog/cross-border-data-sharing-and-india-a-study-in-processes-content-and-capacity</link>
    <description>
        &lt;b&gt;A majority of criminal investigations in the modern era necessitate law enforcement access to electronic evidence stored extra-territorially. The conventional methods of compelling the presentation of evidence available for investigative agencies often fail when the evidence is not present within the territorial boundaries of the state.

&lt;/b&gt;
        &lt;p&gt;&lt;span&gt;The crux of the issue lies in the age old international law tenet of territorial sovereignty.Investigating crimes is a sovereign act and it cannot be exercised in the territory of another country without that country’s consent or through a permissive principle of extra-territorial jurisdiction. Certain countries have explicit statutory provisions which disallow companies incorporated in their territory from disclosing data to foreign jurisdictions. The United States of America, which houses most of the leading technological firms like Google, Apple, Microsoft, Facebook, and Whatsapp, has this requirement.&lt;/span&gt;&lt;/p&gt;
&lt;p&gt;This necessitates a consent based international model for cross border data sharing as a completely ad-hoc system of requests for each investigation would be ineffective. Towards this, Mutual Legal Assistance Treaties (MLATs) are the most widely used method for cross border data sharing, with letters rogatory, emergency requests and informal requests being other methods available to most investigators. While recent gambits towards ring-fencing the data within Indian shores might alter the contours of the debate, a sustainable long-term strategy requires a coherent negotiation strategy that enables co-operation with a range of international partners.&lt;/p&gt;
&lt;p&gt;This negotiation strategy needs to be underscored by domestic safeguards that ensure human rights guarantees in compliance with international standards, robust identification and augmentation of  capacity and clear articulation of how India’s strategy lines up with the existing tenets of International law. This report studies the workings of the Mutual Legal Assistance Treaty (MLAT) between the USA and India and identifies hurdles in its existing form, culls out suggestions for improvement and explores how recent legislative developments, such as the CLOUD Act might alter the landscape.&lt;/p&gt;
&lt;p&gt;The path forward lies in undertaking process based reforms within India with an eye on leveraging these developments to articulate a strategically beneficial when negotiating with external partners.As the nature of policing changes to a model that increasingly relies on electronic evidence, India needs to ensure that it’s technical strides made in accessing this evidence is not held back by the lack of an enabling policy environment. While the data localisation provisions introduced in the draft Personal Data Protection Bill may alter the landscape once it becomes law, this paper retains  its relevance in terms of guiding the processes, content and capacity to adequately manoeuvre the present conflict of laws situation and accessing data not belonging to Indians that may be needed for criminal investigations.As a disclaimer,the report and graphics contained within it have been drafted using publicly available information and may not reflect real world practices.&lt;span&gt; &lt;/span&gt;&lt;/p&gt;
&lt;hr /&gt;
&lt;p class="moz-quote-pre" style="text-align: justify; "&gt;&lt;strong&gt;&lt;a class="external-link" href="http://cis-india.org/internet-governance/files/mlat-report"&gt;Click here to download the report&lt;/a&gt;&lt;/strong&gt; With research assistance from Sarath Mathew and Navya Alam and visualisation by Saumyaa Naidu&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/cross-border-data-sharing-and-india-a-study-in-processes-content-and-capacity'&gt;https://cis-india.org/internet-governance/blog/cross-border-data-sharing-and-india-a-study-in-processes-content-and-capacity&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Amber Sinha, Elonnai Hickok, Udbhav Tiwari and Arindrajit Basu</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-09-29T00:37:39Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/takshashilas-online-cogitatum-on-ai-and-ethics-in-india">
    <title>Takshashila's online Cogitatum on AI and Ethics in India</title>
    <link>https://cis-india.org/internet-governance/news/takshashilas-online-cogitatum-on-ai-and-ethics-in-india</link>
    <description>
        &lt;b&gt;Elonnai Hickok participated in an event organized by Takhshashila on August 27, 2018 and made a presentation on Ethics and AI in India. The event was held in Takshashila Institution &lt;/b&gt;
        &lt;p&gt;&lt;a class="external-link" href="http://cis-india.org/internet-governance/files/ethics-and-ai"&gt;Click to view the slides&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/takshashilas-online-cogitatum-on-ai-and-ethics-in-india'&gt;https://cis-india.org/internet-governance/news/takshashilas-online-cogitatum-on-ai-and-ethics-in-india&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-09-26T01:46:39Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/sflc-round-table-discussion-on-personal-data-protection-bill">
    <title>SFLC Round Table Discussion on Personal Data Protection Bill </title>
    <link>https://cis-india.org/internet-governance/news/sflc-round-table-discussion-on-personal-data-protection-bill</link>
    <description>
        &lt;b&gt;Shweta Mohandas participated in a Round Table Discussion on Personal Data Protection Bill, orgnanised by SFLC on September 25, 2018 in Bangalore. She also moderated the first session - Data Protection Principles (Rights and Obligations).&lt;/b&gt;
        &lt;p&gt;See the agenda of the &lt;a class="external-link" href="http://cis-india.org/internet-governance/files/agenda-for-round-table-for-data-protection"&gt;event here&lt;/a&gt;.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/sflc-round-table-discussion-on-personal-data-protection-bill'&gt;https://cis-india.org/internet-governance/news/sflc-round-table-discussion-on-personal-data-protection-bill&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Data Protection</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-10-02T03:16:19Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/forecasting-the-implications-of-the-cloud-act-around-the-world">
    <title>Forecasting the Implications of the CLOUD Act Around the World</title>
    <link>https://cis-india.org/internet-governance/news/forecasting-the-implications-of-the-cloud-act-around-the-world</link>
    <description>
        &lt;b&gt;Elonnai Hickok participated in the event organized by the Global Network Initiative at the Russell Senate Office Building, Washington D.C. on September 18, 2018 as a speaker.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;Elonnai spoke on the CLOUD Act from an Indian perspective based on the article that she &lt;a class="external-link" href="https://cis-india.org/internet-governance/blog/an-analysis-of-the-cloud-act-and-implications-for-india"&gt;co-authored&lt;/a&gt; with Vipul Kharbanda.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/forecasting-the-implications-of-the-cloud-act-around-the-world'&gt;https://cis-india.org/internet-governance/news/forecasting-the-implications-of-the-cloud-act-around-the-world&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-09-20T15:51:48Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/conference-on-data-protection">
    <title>Conference on Data Protection</title>
    <link>https://cis-india.org/internet-governance/news/conference-on-data-protection</link>
    <description>
        &lt;b&gt;Sunil Abraham  and Amber Sinha participated in a conference on data protection at NIPFP in New Delhi on September 4, 2018. The event was organized by National Institute of Public Finance and Policy. &lt;/b&gt;
        &lt;p&gt;Sunil Abraham and Amber Sinha were discussant in the session Disclosures in Privacy Policies: Does Consent Work?&lt;/p&gt;
&lt;p&gt;&lt;a class="external-link" href="http://cis-india.org/internet-governance/files/data-protection"&gt;Click to see the agenda&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/conference-on-data-protection'&gt;https://cis-india.org/internet-governance/news/conference-on-data-protection&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-09-20T14:47:17Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/meeting-of-information-systems-security-and-biometrics-sectional-committee">
    <title>Meeting of Information Systems Security and Biometrics Sectional Committee</title>
    <link>https://cis-india.org/internet-governance/news/meeting-of-information-systems-security-and-biometrics-sectional-committee</link>
    <description>
        &lt;b&gt;Gurshabad Grover attended the 14th meeting of the Information Systems Security and Biometrics Sectional Committee (LITD 17) of the Bureau of Indian Standards (BIS), which was held at the BIS office in New Delhi on 14 September 2018.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;This was Gurshabad's first LITD 17 meeting. The committee noted my co-option in the committee and registration in Working Group 1 (Information security management systems) and WG5 (Identity management and privacy technologies) of ISO JTC 1 / SC 27 / “IT Security Techniques”. Some of the items discussed included proposed standards for biometric information protection, mobile phone security, and data privacy engineering &amp;amp; management practices.&lt;br /&gt;&lt;br /&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/meeting-of-information-systems-security-and-biometrics-sectional-committee'&gt;https://cis-india.org/internet-governance/news/meeting-of-information-systems-security-and-biometrics-sectional-committee&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-09-19T14:08:23Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/gender-and-privacy-countering-the-patriarchal-gaze">
    <title>Gender and Privacy: Countering the Patriarchal Gaze</title>
    <link>https://cis-india.org/internet-governance/news/gender-and-privacy-countering-the-patriarchal-gaze</link>
    <description>
        &lt;b&gt;Ambika Tandon participated in a workshop on privacy and gender which was organized by Privacy International in United Kingdom on September 13 and 14, 2018. Ambika was part of a panel on reproductive rights and privacy in India. She also recorded a podcast on the same topic, as part of a series on privacy and gender being hosted by Privacy International.&lt;/b&gt;
        &lt;p&gt;Read the Agenda &lt;a class="external-link" href="https://cis-india.org/internet-governance/files/gender-and-privacy-workshop"&gt;here&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/gender-and-privacy-countering-the-patriarchal-gaze'&gt;https://cis-india.org/internet-governance/news/gender-and-privacy-countering-the-patriarchal-gaze&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-09-19T01:48:07Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/symposium-on-data-privacy-and-citizens-rights">
    <title>Symposium on Data Privacy and Citizen's Rights</title>
    <link>https://cis-india.org/internet-governance/news/symposium-on-data-privacy-and-citizens-rights</link>
    <description>
        &lt;b&gt;Shweta Mohandas was a panelist at the Symposium on Data Privacy and Citizen's Rights on September 9, 2018. The Symposium was organised by the Tech Law Forum of NALSAR University of Law, Hyderabad. &lt;/b&gt;
        &lt;h3 style="text-align: justify; "&gt;Concept Note&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;The National Academy of Legal Studies and Research (NALSAR) University of Law, Hyderabad is organising a Symposium on DATA PRIVACY AND CITIZEN’S RIGHTS to provide multiple stakeholders one platform to discuss and deliberate on the BN Srikrishna Committee Report and Draft Bill.  &lt;br /&gt; &lt;br /&gt;The Committee headed by Retd. Justice BN Srikrishna released its Report and Draft Bill on the 27th of July, 2018. It comes at a time when there is increasing discussion about the individual privacy and surveillance by both private organisations and state authorities. Especially in light of the 9-judge Puttaswamy judgment affirming the Fundamental Right to Privacy, there was a need to concretise the right in the form of a statute. The Bill proposes an elaborate data protection framework by utilising concepts such as anonymisation, pseudonymisation, data localisation, guardian data fiduciary, among others. While the Bill has been lauded for providing a data protection framework largely similar to the one proposed by civil society, there are several areas of concern with the Bill such as the amendments suggested to the RTI Act, the impact of the Bill on Free Speech and the lack of substantial provisions regarding surveillance. There has been further criticism that the discussions regarding these issues have been conducted in silos, with little to no dialogue taking place between the various stakeholders and experts in the field.  &lt;br /&gt; &lt;br /&gt;We believe that there is a need to provide a common forum for these stakeholders to interact with each other in providing suggestions that are representative in nature and nuanced in their expression.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Themes&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Privacy and Free Speech This interaction aims to examine the juxtaposition of the constitutional right to free speech and the now constitutionally affirmed right to privacy. Will a new data protection law impact the publication of leaked documents or sting operations like the Radia tapes or Tehelka’s ‘Operation Westend’? If so, how can journalists mitigate the risk of getting sued for breach of privacy?  While the jurisprudence concerning the right to privacy is in its most nascent state, it becomes important for us to explore its contours in light of already established constitutional guarantees.   &lt;br /&gt; &lt;br /&gt;Right to Information and Right to Privacy  How does the right to privacy impact the right to information? The guarantee of these two rights arise from diametrically opposite ideologies, in that privacy aims to shield from the public domain information and data concerning individuals and institutions while the right to information aims to promote transparency and disclosure of information held by the state. However, the question remains, is the existence of these two rights necessarily mutually exclusive? Will a new data protection law make it difficult to promote transparency under the Right to Information Act? Is there is a possibility of a clash between the Information Commissions and the proposed Data Protection Authority? This panel would analyze the co-existence and competitive nature of these two rights in the context of the Indian legal space.  &lt;br /&gt; &lt;br /&gt;Surveillance -  As we move towards a form of governance that is increasingly capable of surveilling individual movements and actions, it becomes extremely necessary for us to understand the nature of surveillance. Can data privacy be compromised for surveillance that may be necessary for increased safety in our physical and virtual living spaces? Are there any provisions that protects data in cases of it becoming exploitable? What is the interaction of international statutes (like ICCPR) and the latest Indian statute in terms of its recognition of necessity of surveillance in contrast to the necessity of protection of data.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/symposium-on-data-privacy-and-citizens-rights'&gt;https://cis-india.org/internet-governance/news/symposium-on-data-privacy-and-citizens-rights&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-09-18T15:18:37Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/artificial-intelligence-in-the-governance-sector-in-india">
    <title>Artificial Intelligence in the Governance Sector in India</title>
    <link>https://cis-india.org/internet-governance/blog/artificial-intelligence-in-the-governance-sector-in-india</link>
    <description>
        &lt;b&gt;The use of Artificial Intelligence has the potential to ameliorate several existing structural inefficiencies in the discharge of governmental functions. Our research indicates that the deployment of this technology across sub-sectors is still on the horizons.&lt;/b&gt;
        &lt;p&gt;Ecosystem Mapping:Shweta Mohandas and Anamika Kundu &lt;br /&gt;Edited by: Amber Sinha, Pranav MB and Vishnu Ramachandran&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;Much of the technological capacity and funding for AI in governance in India is coming from the private sector - a trend we expect will continue as the government engages in an increasing number of partnerships with both start-ups and large corporations alike. While there is considerable enthusiasm and desire by the government to develop AI-driven solutions in governance, including the release of two reports identifying the broad contours of India’s AI strategy, this enthusiasm is yet to be underscored by adequate financial, infrastructural, and technological capacity. This gap provides India with a unique opportunity to understand some the of the ethical, legal and technological hurdles faced by the West both during and after the implementation of similar technology and avoid these challenges when devising its own AI strategy and regulatory policy.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The case study identified five sub-sectors including law enforcement, education, defense, discharge of governmental functions and also considered the implications of AI in judicial decision-making processes that have been used in the United States. After mapping the uses of AI in various sub-sectors, this report identifies several challenges to the deployment of this technology. This includes factors such as infrastructural and technological capacity, particularly among key actors at the grassroots level, lack of trust in AI driven solutions and adequate funding. We also identified several ethical and legal concerns that policy-makers must grapple with. These include over-dependence on AI systems, privacy and security, assignment of liability, bias and discrimination both in process and outcome, transparency and due process. Subsequently, this report can be considered as a roadmap for the future of AI in India by tracking corresponding and emerging developments in other parts of the world. In the final section of the report, we propose several recommendations for policy-makers and developers that might address some of the challenges and ethical concerns identified. Some of these include benchmarks for the use of AI in the public sector, development of standards of explanation, a standard framework for engagement with the private sector, leveraging AI as a field to further India’s international strategy, developing adequate standards of data curation, ensuring that the benefits of the technology reaches the lowest common denominator, adopting interdisciplinary approaches to the study of Artificial Intelligence and    developing fairness,transparency and due process through the contextual application of a rules-based system.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;It is crucial that policy-makers do not adopt a ‘one-size-fits-all’ approach to AI regulation but consider all options within a regulatory spectrum that considers the specific impacts of the deployment of this technology for each sub-sector within governance - with the distinction of public sector use. Given that the governance sector has potential implications for the fundamental rights of all citizens, it is also imperative that the government does not shy away from its obligation to ensure the fair and ethical deployment of this technology while also ensuring the existence of robust redress mechanisms. To do so, it must chart out a standard rules-based system that creates guidelines and standards for private sector development of AI solutions for the public sector. As with other emerging technology, the success of Artificial intelligence depends on whether it is deployed with the intention of placing greater regulatory scrutiny on the daily lives of individuals or for harnessing individual potential that augment rather than counter the core tenets of constitutionalism and human dignity.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Read the full report &lt;a href="https://cis-india.org/internet-governance/ai-and-governance-case-study-pdf"&gt;here&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/artificial-intelligence-in-the-governance-sector-in-india'&gt;https://cis-india.org/internet-governance/blog/artificial-intelligence-in-the-governance-sector-in-india&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Arindrajit Basu and Elonnai Hickok</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-09-14T11:37:58Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/ai-in-india-a-policy-agenda">
    <title>AI in India: A Policy Agenda</title>
    <link>https://cis-india.org/internet-governance/blog/ai-in-india-a-policy-agenda</link>
    <description>
        &lt;b&gt;&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;&lt;a class="external-link" href="http://cis-india.org/internet-governance/files/ai-in-india-a-policy-agenda"&gt;Click&lt;/a&gt; to download the file&lt;/p&gt;
&lt;hr style="text-align: justify; " /&gt;
&lt;h1 style="text-align: justify; "&gt;Background&lt;/h1&gt;
&lt;p style="text-align: justify; "&gt;Over the last few months, the Centre for Internet and Society has been engaged in the mapping of use and impact of artificial intelligence in health, banking, manufacturing, and governance sectors in India through the development of a case study compendium.&lt;a href="#_ftn1" name="_ftnref1"&gt;&lt;sup&gt;&lt;sup&gt;[1]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Alongside this research, we are examining the impact of Industry 4.0 on jobs and employment and questions related to the future of work in India. We have also been a part of several global conversations on artificial intelligence and autonomous systems. The Centre for Internet and Society is part of the Partnership on Artificial Intelligence, a consortium which has representation from some of most important companies and civil society organisations involved in developments and research on artificial intelligence. We have contributed to the The IEEE Global Initiative on Ethics of Autonomous and Intelligent Systems, and are also a part of a Big Data for Development Global Network, where we are undertaking research towards evolving ethical principles for use of computational techniques. The following are a set of recommendations we have arrived out of our research into artificial intelligence, particularly the sectoral case studies focussed on the development and use of artificial intelligence in India.&lt;/p&gt;
&lt;h1 style="text-align: justify; "&gt;National AI Strategies: A Brief Global Overview&lt;/h1&gt;
&lt;p style="text-align: justify; "&gt;Artificial Intelligence is emerging as  a central policy issue  in several countries. In October 2016, the Obama White House released a report titled, “Preparing for the Future of Artificial Intelligence”&lt;a href="#_ftn2" name="_ftnref2"&gt;&lt;sup&gt;&lt;sup&gt;[2]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; delving into a range of issues including application for public goods, regulation, economic impact, global security and fairness issues. The White House also released a companion document called the “National Artificial Intelligence Research and Development Strategic Plan”&lt;a href="#_ftn3" name="_ftnref3"&gt;&lt;sup&gt;&lt;sup&gt;[3]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; which laid out a strategic plan for Federally-funded research and development in AI. These were the first of a series of policy documents released by the US towards the role of AI. The United Kingdom announced its 2020 national development strategy and issued a government report to accelerate the application of AI by government agencies while in 2018 the Department for Business, Energy, and Industrial Strategy released the Policy Paper - AI Sector Deal.&lt;a href="#_ftn4" name="_ftnref4"&gt;&lt;sup&gt;&lt;sup&gt;[4]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; The Japanese government released it paper on Artificial Intelligence Technology Strategy in 2017.&lt;a href="#_ftn5" name="_ftnref5"&gt;&lt;sup&gt;&lt;sup&gt;[5]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; The European Union launched "SPARC," the world’s largest civilian robotics R&amp;amp;D program, back in 2014.&lt;a href="#_ftn6" name="_ftnref6"&gt;&lt;sup&gt;&lt;sup&gt;[6]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Over the last year and a half, Canada,&lt;a href="#_ftn7" name="_ftnref7"&gt;&lt;sup&gt;&lt;sup&gt;[7]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; China,&lt;a href="#_ftn8" name="_ftnref8"&gt;&lt;sup&gt;&lt;sup&gt;[8]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; the UAE,&lt;a href="#_ftn9" name="_ftnref9"&gt;&lt;sup&gt;&lt;sup&gt;[9]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Singapore,&lt;a href="#_ftn10" name="_ftnref10"&gt;&lt;sup&gt;&lt;sup&gt;[10]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; South Korea&lt;a href="#_ftn11" name="_ftnref11"&gt;&lt;sup&gt;&lt;sup&gt;[11]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;, and France&lt;a href="#_ftn12" name="_ftnref12"&gt;&lt;sup&gt;&lt;sup&gt;[12]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; have announced national AI strategy documents while 24 member States in the EU have committed to develop national AI policies that reflect a “European” approach to AI &lt;a href="#_ftn13" name="_ftnref13"&gt;&lt;sup&gt;&lt;sup&gt;[13]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;. Other countries such as Mexico and Malaysia are in the process of evolving their national AI strategies. What this suggests is that AI is quickly emerging as central to national plans around the development of science and technology as well as economic and national security and development. There is also a focus on investments enabling AI innovation in critical national domains as a means of addressing key challenges facing nations. India has followed this trend and in 2018 the government published two AI roadmaps - the Report of Task Force on Artificial Intelligence by the AI Task Force constituted by the Ministry of Commerce and Industry&lt;a href="#_ftn14" name="_ftnref14"&gt;&lt;sup&gt;&lt;sup&gt;[14]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and the National Strategy for Artificial Intelligence by Niti Aayog.&lt;a href="#_ftn15" name="_ftnref15"&gt;&lt;sup&gt;&lt;sup&gt;[15]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Some of the key themes running across the National AI strategies globally are spelt out below.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;Economic Impact of AI&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;A common thread that runs across the different national approaches to AI is the belief in the significant economic impact of AI, that it will likely increase productivity and create wealth. The British government estimated that AI could add $814 billion to the UK economy by 2035. The UAE report states that by 2031, AI will help boost the country’s GDP by 35 per cent, reduce government costs by 50 per cent. Similarly, China estimates that the core AI market will be worth 150 billion RMB ($25bn) by 2020, 400 billion RMB ($65bn) and one trillion RMB ($160bn) by 2030. The impact of adoption of AI and automation of labour and employment is also a key theme touched upon across the strategies. For instance, the White House Report of October 2016 states the US workforce is unprepared – and that a serious education programme, through online courses and in-house schemes, will be required.&lt;a href="#_ftn16" name="_ftnref16"&gt;&lt;sup&gt;&lt;sup&gt;[16]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;State Funding&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;Another key trend exhibited in all national strategies towards AI has been a commitment by the respective governments towards supporting research and development in AI. The French government has stated that it intends to invest €1.5 billion ($1.85 billion) in AI research in the period through to 2022. The British government’s recommendations, in late 2017, were followed swiftly by a promise in the autumn budget of new funds, including at least £75 million for AI. Similarly, the the Canadian government put together a $125-million ‘pan-Canadian AI strategy’ last year.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;AI for Public Good&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;The use of AI for Public Good is a significant focus of most AI policies. The biggest justification for AI innovation as a legitimate objective of public policy is its promised impact towards improvement of  people’s lives by helping to solve some of the world’s greatest challenges and inefficiencies, and emerge as a transformative technology, much like mobile computing. These public good uses of AI are emerging across sectors such as transportation, migration, law enforcement and justice system, education, and agriculture..&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;National Institutions leading AI research&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;Another important trend which was  key to the implementation of national AI strategies is the creation or development of well-funded centres of excellence which would serve as drivers of research and development and leverage synergies with the private sector. The French Institute for Research in Computer Science and Automation (INRIA) plans to create a national AI research program with five industrial partners. In UK, The Alan Turing Institute is likely to emerge as the national institute for data science, and an AI Council would be set up to manage inter-sector initiatives and training. In Canada, Canadian Institute for Advanced Research (CIFAR) has been tasked with implementing their AI strategy. Countries like Japan has a less centralised structure with the creation of strategic council for AI technology’ to promote research and development in the field, and manage a number of key academic institutions, including NEDO and its national ICT (NICT) and science and tech (JST) agencies. These institutions are key to successful implementation of national agendas and policies around AI.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;AI, Ethics and Regulation&lt;/h2&gt;
&lt;p style="text-align: justify; "&gt;Across the AI strategies — ethical dimensions and regulation of AI were highlighted as concerns that needed to be addressed. Algorithmic transparency and explainability, clarity on liability, accountability and oversight, bias and discrimination, and privacy are ethical  and regulatory questions that have been raised. Employment and the future of work is another area of focus that has been identified by countries.  For example, the US 2016 Report reflected on if existing regulation is adequate to address risk or if adaption is needed by examining the use of AI in automated vehicles. In the policy paper - AI Sector Deal - the UK proposes four grand challenges: AI and Data Economy, Future Mobility, Clean Growth, and Ageing Society. The Pan Canadian Artificial Intelligence Strategy focuses on developing global thought leadership on the economic, ethical, policy, and legal implications of advances in artificial intelligence.&lt;a href="#_ftn17" name="_ftnref17"&gt;&lt;sup&gt;&lt;sup&gt;[17]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The above are important factors and trends to take into account and to different extents have been reflected in the two national roadmaps for AI. Without adequate institutional planning, there is a risk of national strategies being too monolithic in nature.  Without sufficient supporting mechanisms in the form of national institutions which would drive the AI research and innovation, capacity building and re-skilling of workforce to adapt to changing technological trends, building regulatory capacity to address new and emerging issues which may disrupt traditional forms of regulation and finally, creation of an environment of monetary support both from the public and private sector it becomes difficult to implement a national strategy and actualize the potentials of AI . As stated above, there is also a need for identification of key national policy problems which can be addressed by the use of AI, and the creation of a framework with institutional actors to articulate the appropriate plan of action to address the problems using AI. There are several ongoing global initiatives which are in the process of trying to articulate key principles for ethical AI. These discussions also feature in some of the national strategy documents.&lt;/p&gt;
&lt;h1 style="text-align: justify; "&gt;Key considerations for AI policymaking in India&lt;/h1&gt;
&lt;p style="text-align: justify; "&gt;As mentioned above, India has published two national AI strategies. We have responded to both of these here&lt;a href="#_ftn18" name="_ftnref18"&gt;&lt;sup&gt;&lt;sup&gt;[18]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and here.&lt;a href="#_ftn19" name="_ftnref19"&gt;&lt;sup&gt;&lt;sup&gt;[19]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Beyond these two roadmaps, this policy brief reflects on a number of factors that need to come together for India to leverage and adopt AI across sectors, communities, and technologies successfully.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;Resources, Infrastructure, Markets, and Funding&lt;/h2&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;Ensure adequate government funding and investment in R&amp;amp;D&lt;/b&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;As mentioned above, a survey of all major national strategies on AI reveals a significant financial commitment from governments towards research and development surrounding AI. Most strategy documents speak of the need to safeguard national ambitions in the race for AI development. In order to do so it is imperative to have a national strategy for AI research and development, identification of nodal agencies to enable the process, and creation of institutional capacity to carry out cutting edge research.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Most jurisdictions such as Japan, UK and China have discussed collaborations between the industry and government to ensure greater investment into AI research and development. The European Union has spoken using the existing public-private partnerships, particularly in robotics and big data to boost investment by over one and half times.&lt;a href="#_ftn20" name="_ftnref20"&gt;&lt;sup&gt;&lt;sup&gt;[20]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; To some extent, this  step has been initiated by the Niti Aayog strategy paper. The paper lists out enabling factors for the widespread adoption of AI and maps out specific government agencies and ministries that could promote such growth. In February 2018, the Ministry of Electronics and IT also set up four committees to prepare a roadmap for a national AI programme. The four committees are presently studying AI in context of citizen centric services; data platforms; skilling, reskilling and R&amp;amp;D; and legal, regulatory and cybersecurity perspectives.&lt;a href="#_ftn21" name="_ftnref21"&gt;&lt;sup&gt;&lt;sup&gt;[21]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;Democratize AI technologies and data&lt;/b&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Clean, accurate, and appropriately curated data is essential for training algorithms. Importantly, large quantities of data alone does not translate into better results. Accuracy and curation of data should be prerequisites to quantity of data. Frameworks to generate and access larger quantity of data should not hinge on models of centralized data stores. The government and the private sector are generally gatekeepers to vast amounts of data and technologies. Ryan Calo has called this an issue of data parity,&lt;a href="#_ftn22" name="_ftnref22"&gt;&lt;sup&gt;&lt;sup&gt;[22]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; where only a few well established leaders in the field have the ability to acquire data and build datasets. Gaining access to data comes with its own questions of ownership, privacy, security, accuracy, and completeness. There are a number of different approaches and techniques that can be adopted to enable access to data.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Open Government Data &lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Robust open data sets is one way in which access can be enabled. Open data is particularly important for small start-ups as they build prototypes. Even though India is a data dense country and has in place a National Data and Accessibility Policy India does not yet have robust and comprehensive open data sets across sectors and fields.  Our research found that this is standing as an obstacle to innovation in the Indian context as startups often turn to open datasets in the US and Europe for developing prototypes. Yet, this is problematic because the demography represented in the data set is significantly different resulting in the development of solutions that are trained to a specific demographic, and thus need to be re-trained on Indian data. Although AI is technology agnostic, in the cases of different use cases of data analysis, demographically different training data is not ideal. This is particularly true for certain categories such as health, employment, and financial data.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The government can play a key role in providing access to datasets that will help the functioning and performance of AI technologies. The Indian government has already made a move towards accessible datasets through the Open Government Data Platform which provides access to a range of data collected by various ministries. Telangana has developed its own Open Data Policy which has stood out for its transparency and the quality of data collected and helps build AI based solutions.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In order to encourage and facilitate innovation, the central and state governments need to actively pursue and implement the National Data and Accessibility Policy.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Access to Private Sector Data &lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;The private sector is the gatekeeper to large amounts of data. There is a need to explore different models of enabling access to private sector data while ensuring and protecting users rights and company IP. This data is often considered as a company asset and not shared with other stakeholders. Yet, this data is essential in enabling innovation in AI.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Amanda Levendowski states that ML practitioners have essentially three options in securing sufficient data— build the databases themselves, buy the data, or use data in the public domain. The first two alternatives are largely available to big firms or institutions. Smaller firms often end resorting to the third option but it carries greater risks of bias.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;A solution could be federated access, with companies allowing access to researchers and developers to encrypted data without sharing the actual data.  Another solution that has been proposed is ‘watermarking’ data sets.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Data sandboxes have been promoted as tools for enabling innovation while protecting privacy, security etc. Data sandboxes allow companies access to large anonymized data sets under controlled circumstances. A regulatory sandbox is a controlled environment with relaxed regulations that allow the product to be tested thoroughly before it is launched to the public. By providing certification and safe spaces for testing, the government will encourage innovation in this sphere. This system has already been adopted in Japan where there are AI specific regulatory sandboxes to drive society 5.0.160 data sandboxes are tools that can be considered within specific sectors to enable innovation. A sector wide data sandbox was also contemplated by TRAI.&lt;a href="#_ftn23" name="_ftnref23"&gt;&lt;sup&gt;&lt;sup&gt;[23]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; A sector specific governance structure can establish a system of ethical reviews of underlying data used to feed the AI technology along with data collected in order to ensure that this data is complete, accurate and has integrity. A similar system has been developed by Statistics Norway and the Norwegian Centre for Research Data.&lt;a href="#_ftn24" name="_ftnref24"&gt;&lt;sup&gt;&lt;sup&gt;[24]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;AI Marketplaces&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;The National Roadmap for Artificial Intelligence by NITI Aayog proposes the creation of a National AI marketplace that is comprised of a data marketplace, data annotation marketplace, and deployable model marketplace/solutions marketplace.&lt;a href="#_ftn25" name="_ftnref25"&gt;&lt;sup&gt;&lt;sup&gt;[25]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; In particular, it is envisioned that the data marketplace would be based on blockchain technology and have the features of: traceability, access controls, compliance with local and international regulations, and robust price discovery mechanism for data. Other questions that will need to be answered center around pricing and ensuring equal access. It will also be interesting how the government incentivises the provision of data by private sector companies. Most data marketplaces that are emerging are initiated by the private sector.&lt;a href="#_ftn26" name="_ftnref26"&gt;&lt;sup&gt;&lt;sup&gt;[26]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; A government initiated marketplace has the potential to bring parity to some of the questions raised above, but it should be strictly limited to private sector data in order to not replace open government data.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Open Source Technology &lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;A number of companies are now offering open source AI technologies. For example, TensorFlow, Keras, Scikit-learn, Microsoft Cognitive Toolkit, Theano, Caffe, Torch, and Accord.NET.&lt;a href="#_ftn27" name="_ftnref27"&gt;&lt;sup&gt;&lt;sup&gt;[27]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; The government should incentivise and promote open source AI technologies towards harnessing and accelerating research in AI.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;Re-thinking Intellectual Property Regimes &lt;/b&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Going forward it will be important for the government to develop an intellectual property framework that encourages innovation. AI systems are trained by reading, viewing, and listening to copies of human-created works. These resources such as books, articles, photographs, films, videos, and audio recordings are all key subjects of copyright protection. Copyright law grants exclusive rights to copyright owners, including the right to reproduce their works in copies, and one who violates one of those exclusive rights “is an infringer of copyright.&lt;a href="#_ftn28" name="_ftnref28"&gt;&lt;sup&gt;&lt;sup&gt;[28]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The enterprise of AI is, to this extent, designed to conflict with tenets of copyright law, and after the attempted ‘democratization’ of copyrighted content by the advent of the Internet, AI poses the latest challenge to copyright law. At the centre of this challenge is the fact that it remains an open question whether a copy made to train AI is a “copy” under copyright law, and consequently whether such a copy is an infringement.&lt;a href="#_ftn29" name="_ftnref29"&gt;&lt;sup&gt;&lt;sup&gt;[29]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; The fractured jurisprudence on copyright law is likely to pose interesting legal questions with newer use cases of AI. For instance, Google has developed a technique called federated learning, popularly referred to as on-device ML, in which training data is localised to the originating mobile device rather than copying data to a centralized server.&lt;a href="#_ftn30" name="_ftnref30"&gt;&lt;sup&gt;&lt;sup&gt;[30]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; The key copyright questions here is whether decentralized training data stored in random access memory (RAM) would be considered as “copies”.&lt;a href="#_ftn31" name="_ftnref31"&gt;&lt;sup&gt;&lt;sup&gt;[31]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; There are also suggestions that copies made for the purpose of training of machine learning systems may be so trivial or de minimis that they may not qualify as infringement.&lt;a href="#_ftn32" name="_ftnref32"&gt;&lt;sup&gt;&lt;sup&gt;[32]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; For any industry to flourish, there needs to be legal and regulatory clarity and it is imperative that these copyright questions emerging out of use of AI be addressed soon.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;As noted in our response to the Niti Aayog national AI strategy  “&lt;i&gt;The report also blames the current Indian  Intellectual Property regime for being “unattractive” and averse to incentivising research and adoption of AI. Section 3(k) of Patents Act exempts algorithms from being patented, and the Computer Related Inventions (CRI) Guidelines have faced much controversy over the patentability of mere software without a novel hardware component. The paper provides no concrete answers to the question of whether it should be permissible to patent algorithms, and if yes, to  to what extent. Furthermore, there needs to be a standard either in the CRI Guidelines or the Patent Act, that distinguishes between AI algorithms and non-AI algorithms. Additionally, given that there is no historical precedence on the requirement of patent rights to incentivise creation of AI,  innovative investment protection mechanisms that have lesser negative externalities, such as compensatory liability regimes would be more desirable.  The report further failed to look at the issue holistically and recognize that facilitating rampant patenting can form a barrier to smaller companies from using or developing  AI. This is important to be cognizant of given the central role of startups to the AI ecosystem in India and because it can work against the larger goal of inclusion articulated by the report.”&lt;a href="#_ftn33" name="_ftnref33"&gt;&lt;sup&gt;&lt;b&gt;&lt;sup&gt;[33]&lt;/sup&gt;&lt;/b&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/i&gt;&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;National infrastructure to support domestic development &lt;/b&gt;&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Building a robust national Artificial Intelligence solution requires establishing adequate indigenous  infrastructural capacity for data storage and processing.  While this should not necessarily extend to mandating data localisation as the draft privacy bill has done, capacity should be developed to store data sets generated by indigenous nodal points.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;AI Data Storage &lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Capacity needs to increase as the volume of data that needs to be processed in India increases. This includes ensuring effective storage capacity, IOPS (Input/Output per second) and ability to process massive amounts of data.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;AI Networking Infrastructure&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Organizations will need to upgrade their networks in a bid to upgrade and optimize efficiencies of scale. Scalability must be undertaken on a high priority which will require a high-bandwidth, low latency and creative architecture, which requires appropriate last mile data curation enforcement.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;Conceptualization and Implementation&lt;/h2&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;Awareness, Education, and Reskilling &lt;/b&gt;&lt;/h3&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Encouraging AI research&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;This can be achieved by collaborations between the government and large companies to promote accessibility and encourage innovation through greater R&amp;amp;D spending. The Government of Karnataka, for instance, is collaborating with NASSCOM to set up a Centre of Excellence for Data Science and Artificial Intelligence (CoE-DS&amp;amp;AI) on a public-private partnership model to “accelerate the ecosystem in Karnataka by providing the impetus for the development of data science and artificial intelligence across the country.” Similar centres could be incubated in hospitals and medical colleges in India.  Principles of public funded research such as FOSS, open standards, and open data should be core to government initiatives to encourage research.  The Niti Aaayog report proposes a two tier integrated approach towards accelerating research, but is currently silent on these principles.&lt;a href="#_ftn34" name="_ftnref34"&gt;&lt;sup&gt;&lt;sup&gt;[34]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Therefore,as suggested by the NITI AAYOG Report, the government needs to set up ‘centres of excellence’. Building upon the stakeholders identified in the NITI AAYOG Report, the centers of excellence should  involve a wide range of experts including lawyers, political philosophers, software developers, sociologists and gender studies from diverse organizations including government, civil society,the private sector and research institutions  to ensure the fair and efficient roll out of the technology.&lt;a href="#_ftn35" name="_ftnref35"&gt;&lt;sup&gt;&lt;sup&gt;[35]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; An example is the Leverhulme Centre for the Future of Intelligence set up by the Leverhulme Foundation at the University of Cambridge&lt;a href="#_ftn36" name="_ftnref36"&gt;&lt;sup&gt;&lt;sup&gt;[36]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and the AI Now Institute at New York University (NYU)&lt;a href="#_ftn37" name="_ftnref37"&gt;&lt;sup&gt;&lt;sup&gt;[37]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; These research centres bring together a wide range of experts from all over the globe.&lt;a href="#_ftn38" name="_ftnref38"&gt;&lt;sup&gt;&lt;sup&gt;[38]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Skill sets to successfully adopt AI&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Educational institutions should provide opportunities for students to skill themselves to adapt to adoption of AI, and also push for academic programmes around AI. It is also important to introduce computing technologies such as AI in medical schools in order to equip doctors to adopt the technical skill sets and ethics required to use integrate AI in their practices. Similarly, IT institutes could include courses on ethics, privacy, accountability etc. to equip engineers and developers with an understanding of the questions surrounding the technology and services they are developing.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Societal Awareness Building&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Much of the discussion around skilling for AI is in the context of the workplace, but there is a need for awareness to be developed across society for a broader adaptation to AI. The Niti Aayog report takes the first steps towards this - noting the importance of highlighting the benefits of AI to the public. The conversation needs to go beyond this towards enabling individuals to recognize and adapt to changes that might be brought about - directly and indirectly - by AI - inside and outside of the workplace. This could include catalyzing a shift in mindset to life long learning and discussion around potential implications of human-machine interactions.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Early Childhood Awareness and Education &lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;It is important that awareness around AI begins in early childhood. This is  in part because children already interact with AI and increasingly will do so and thus awareness is needed in how AI works and can be safely and ethically used. It is also important to start building the skills that will be necessary in an AI driven society from a young age.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Focus on marginalised groups &lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Awareness, skills, and education should be targeted at national minorities including rural communities, the disabled, and women. Further, there should be a concerted  focus on communities that are under-represented in the tech sector-such as women and sexual minorities-to ensure that the algorithms themselves and the community working on AI driven solutions are holistic and cohesive. For example, Iridescent focuses on girls, children, and families to enable them to adapt to changes like artificial intelligence through promoting curiosity, creativity, and perseverance to become lifelong learners.&lt;a href="#_ftn39" name="_ftnref39"&gt;&lt;sup&gt;&lt;sup&gt;[39]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; This will be important towards ensuring that AI does not deepen societal  and global inequalities including digital divides. Widespread use of AI will undoubtedly require re-skilling various stakeholders in order to make them aware of the prospects of AI.&lt;a href="#_ftn40" name="_ftnref40"&gt;&lt;sup&gt;&lt;sup&gt;[40]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Artificial Intelligence itself can be used as a resource in the re-skilling process itself-as it would be used in the education sector to gauge people’s comfort with the technology and plug necessary gaps.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Improved access to and awareness of Internet of Things&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;The development of smart content or Intelligent Tutoring Systems in the education can only be done on a large scale if both the teacher and the student has access to and feel comfortable with using basic IoT devices . A U.K. government report has suggested that any skilled workforce  using AI should be a mix of those with a basic understanding responsible for implementation at the grassroots level , more informed users and specialists with advanced development and implementation skills.&lt;a href="#_ftn41" name="_ftnref41"&gt;&lt;sup&gt;&lt;sup&gt;[41]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;The same logic applies to the agriculture sector, where the government is looking to develop smart weather-pattern tracking applications. A potential short-term solution may lie in ensuring that key actors have access to an  IoT device so that he/she may access digital and then impart the benefits of access to proximate individuals. In the education sector, this would involve ensuring that all teachers have access to and are competent in using an IoT device. In the agricultural sector, this may involve equipping each village with a set of IoT devices so that the information can be shared among concerned individuals. Such an approach recognizes that AI is not the only technology catalyzing change - for example industry 4.0 is understood as  comprising of a suite of technologies including but not limited to AI.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Public Discourse&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;As solutions bring together and process vast amounts of granular data, this data can be from a variety of public and private sources - from third party sources or generated by the AI and its interaction with its environment. This means that very granular and non traditional data points are now going into decision making processes. Public discussion is needed to understand social and cultural norms and standards and how these might translate into acceptable use norms for data in various sectors.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;Coordination and collaboration across stakeholders &lt;/b&gt;&lt;/h3&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Development of Contextually Nuanced and Appropriate AI Solutions &lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Towards ensuring effectiveness and  accuracy it is important that solutions used in India are developed to account for cultural nuances and diversity. From our research this could be done in a number of ways ranging from: training AI solutions used in health on data from Indian patients to account for differences in demographics&lt;a href="#_ftn42" name="_ftnref42"&gt;&lt;sup&gt;&lt;sup&gt;[42]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;,  focussing on  natural language voice recognition to account for the diversity in languages and digital skills in the Indian context,&lt;a href="#_ftn43" name="_ftnref43"&gt;&lt;sup&gt;&lt;sup&gt;[43]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and developing and applying AI to reflect societal norms and understandings.&lt;a href="#_ftn44" name="_ftnref44"&gt;&lt;sup&gt;&lt;sup&gt;[44]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Continuing, deepening, and expanding  partnerships for innovation&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Continued innovation while holistically accounting for the challenges that AI poses  will be key for actors in the different sectors to remain competitive. As noted across case study reports partnerships is key in  facilitating this innovation and filling capacity gaps. These partnerships can be across sectors, institutions, domains, geographies, and stakeholder groups. For example:  finance/ telecom, public/private, national/international, ethics/software development/law, and academia/civil society/industry/government.  We would emphasize collaboration between actors across different domains and stakeholder groups as developing holistics AI solutions demands multiple understandings and perspectives.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Coordinated Implementation&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Key sectors in India need to  begin to take steps to consider sector wide coordination in implementing AI. Potential stress and system wide vulnerabilities would need to be considered when undertaking this. Sectoral regulators such as RBI, TRAI, and the Medical Council of India are ideally placed to lead this coordination.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Develop contextual standard benchmarks to assess quality of algorithms&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;In part because of the nacency of the development and implementation of AI,  towards enabling effective assessments of algorithms to understand impact and informing selection by institutions adopting solutions, standard benchmarks can help in assessing quality and appropriateness of algorithms. It may be most effective to define such benchmarks at a sectoral level (finance etc.) or by technology and solution (facial recognition etc.).  Ideally, these efforts would be led by the government in collaboration with multiple stakeholders.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Developing a framework for working with the private sector for use-cases by the government&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;There are various potential use cases the government could adopt in order to use AI as a tool for augmenting public service delivery  in India by the government. However, given lack of capacity -both human resource and technological-means that entering into partnerships with the private sector may enable more fruitful harnessing of AI- as has been seen with existing MOUs in the agricultural&lt;a href="#_ftn45" name="_ftnref45"&gt;&lt;sup&gt;&lt;sup&gt;[45]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and healthcare sectors.&lt;a href="#_ftn46" name="_ftnref46"&gt;&lt;sup&gt;&lt;sup&gt;[46]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; However, the partnership must be used as a means to build capacity within the various nodes in the set-up rather than relying  only on  the private sector partner to continue delivering sustainable solutions.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Particularly, in the case of use of AI for governance, there is a need to evolve a clear parameter to do impact assessment prior to the deployment of the technology that clearly tries to map estimated impact of the technology of clearly defined objectives, which must also include the due process, procedural fairness and human rights considerations . As per Article 12 of the Indian Constitution, whenever the government is exercising a public function, it is bound by the entire gamut of fundamental rights articulated in Part III of the Constitution. This is a crucial consideration the government will have to bear in mind whenever it uses AI-regardless of the sector.  In all cases of public service delivery, primary accountability for the use of AI should lie with the government itself, which means that a cohesive and uniform framework which regulates these partnerships must be conceptualised. This framework should incorporate : (a) Uniformity in the wording and content of contracts that the government signs, (b) Imposition of obligations of transparency and accountability on the developer to ensure that the solutions developed are in conjunction with constitutional standards and (c) Continuous evaluation of private sector developers by the government and experts to ensure that they are complying with their obligations.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Defining Safety Critical AI&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;The implications of AI differs according to use. Some countries, such as the EU, are beginning to define sectors where AI should play the role of augmenting jobs as opposed to functioning autonomously. The Global Partnership on AI is has termed sectors where AI tools supplement or replace human decision making in areas such as health and transportation as ‘safety critical AI’ and is  researching best practices for application of AI in these areas.  India will need to think through if there is a threshold that needs to be set and more stringent regulation applied. In addition to uses in health and transportation, defense and law enforcement would be another sector where certain use would require more stringent regulation.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Appropriate certification mechanisms&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Appropriate certificate mechanisms will be important in ensuring the quality of AI solutions.   A significant barrier to the adoption of AI  in some sectors  in India is acceptability of results, which include direct results arrived at using AI technologies as well as opinions provided by practitioners that are influenced/aided by AI technologies. For instance, start-ups in the healthcare sectors often find that they are asked to show proof of a clinical trial when presenting their products to doctors and hospitals, yet clinical trials are expensive, time consuming and inappropriate forms of certification for medical devices and digital health platforms. Startups also face difficulty in conducting clinical trials as there is lack of a clear regulation to adhere to. They believe that while clinical trials are a necessity with respect to drugs, the process often results in obsolescence of the technology by the time it is approved in the context of AI. Yet, medical practitioners are less trusting towards startups who do not have approval from a national or international authority. A possible and partial solution suggested by these startups is to enable doctors to partner with them to conduct clinical trials together. However, such partnerships cannot be at the expense of rigour, and adequate protections need to be built in the enabling regulation.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Serving as a voice for emerging economies in the global debate on AI&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;While India should utilise Artificial Intelligence in the economy as a means of occupying a driving role in the global debate around AI, it must be cautious before allowing the use of Indian territory and infrastructure as a test bed for other emerging economies without considering the ramifications that the utilisation of AI may have for Indian citizens. The NITI AAYOG Report envisions  India as leverage AI as a ‘garage’ for emerging economies.&lt;a href="#_ftn47" name="_ftnref47"&gt;&lt;sup&gt;&lt;sup&gt;[47]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; While there are certain positive connotations of this suggestion in so far as this propels India to occupy a leadership position-both technically and normatively in determining future use cases for AI in India,, in order to ensure that Indian citizens are not used as test subjects in this process, guiding principles could be developed such as requiring that projects have clear benefits for India.&lt;/p&gt;
&lt;h2 style="text-align: justify; "&gt;Frameworks for Regulation&lt;/h2&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;National legislation&lt;/b&gt;&lt;/h3&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Data Protection Law&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;India is a data-dense country, and the lack of a robust privacy  regime, allows the public and private sector easier access to large amounts of data than might be found in other contexts with stringent privacy laws. India also lacks a formal regulatory regime around anonymization. In our research we found that this gap does not always translate into a gap in practice, as some start up companies have  adopted  self-regulatory practices towards protecting privacy such as of anonymising data they receive before using it further, but it does result in unclear and unharmonized practice..&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In order to ensure rights and address emerging challenges to the same posed by artificial intelligence, India needs to enact   a comprehensive privacy legislation applicable to the private and public sector to regulate the use of data, including use in artificial intelligence. A privacy legislation will also have to address more complicated questions such as the use of publicly available data for training algorithms, how traditional data categories (PI vs. SPDI - meta data vs. content data etc.) need to be revisited in light of AI,  and how can a privacy legislation be applied to autonomous decision making. Similarly, surveillance laws may need to be revisited in light of AI driven technologies such as facial recognition, UAS, and self driving cars as they provide new means of surveillance to the state and have potential implications for other rights such as the right to freedom of expression and the right to assembly.  Sectoral protections can compliment and build upon the baseline protections articulated in a national privacy legislation.&lt;a href="#_ftn48" name="_ftnref48"&gt;&lt;sup&gt;&lt;sup&gt;[48]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; In August 2018 the Srikrishna Committee released a draft data protection bill for India. We have reflected on how the Bill addresses AI. Though the Bill brings under its scope companies deploying emerging technologies and subjects them to the principles of privacy by design and data impact assessments, the Bill is silent on key rights and responsibilities, namely the responsibility of the data controller to explain the logic and impact of automated decision making including profiling to data subjects and the right to opt out of automated decision making in defined circumstances.&lt;a href="#_ftn49" name="_ftnref49"&gt;&lt;sup&gt;&lt;sup&gt;[49]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Further, the development of technological solutions to address the dilemma between AI and the need for access to larger quantities of data for multiple purposes and privacy should be emphasized.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Discrimination Law&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;A growing area of research globally is the social consequences of AI with a particular focus on its tendency to replicate or amplify existing and structural inequalities. Problems such as data invisibility of certain excluded groups,&lt;a href="#_ftn50" name="_ftnref50"&gt;&lt;sup&gt;&lt;sup&gt;[50]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; the myth of data objectivity and neutrality,&lt;a href="#_ftn51" name="_ftnref51"&gt;&lt;sup&gt;&lt;sup&gt;[51]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and data monopolization&lt;a href="#_ftn52" name="_ftnref52"&gt;&lt;sup&gt;&lt;sup&gt;[52]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; contribute to the disparate impacts of big data and AI. So far much of the research on this subject has not moved beyond the exploratory phase as is reflected in the reports released by the White House&lt;a href="#_ftn53" name="_ftnref53"&gt;&lt;sup&gt;&lt;sup&gt;[53]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and Federal Trade Commission&lt;a href="#_ftn54" name="_ftnref54"&gt;&lt;sup&gt;&lt;sup&gt;[54]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; in the United States. The biggest challenge in addressing discriminatory and disparate impacts of AI is ascertaining “where value-added personalization and segmentation ends and where harmful discrimination begins.”&lt;a href="#_ftn55" name="_ftnref55"&gt;&lt;sup&gt;&lt;sup&gt;[55]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Some prominent cases where AI can have discriminatory impact are denial of loans based on attributes such as neighbourhood of residence as a proxies which can be used to circumvent anti-discrimination laws which prevent adverse determination on the grounds of race, religion, caste or gender, or adverse findings by predictive policing against persons who are unfavorably represented in the structurally biased datasets used by the law enforcement agencies. There is a dire need for disparate impact regulation in sectors which see the emerging use of AI.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Similar to disparate impact regulation, developments in AI, and its utilisation, especially in credit rating, or risk assessment processes could create complex problems that cannot be solved only by the principle based regulation. Instead, regulation intended specifically to avoid outcomes that the regulators feel are completely against the consumer, could be an additional tool that increases the fairness, and effectiveness of the system.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Competition Law&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;The conversation of use of competition or antitrust laws to govern AI is still at an early stage. However, the emergence of numerous data driven mergers or acquisitions such as Yahoo-Verizon, Microsoft-LinkedIn and Facebook-WhatsApp have made it difficult to ignore the potential role of competition law in the governance of data collection and processing practices. It is important to note that the impact of Big Data goes far beyond digital markets and the mergers of companies such as Bayer, Climate Corp and Monsanto shows that data driven business models can also lead to the convergence of companies from completely different sectors as well. So far, courts in Europe have looked at questions such as the impact of combination of databases on competition&lt;a href="#_ftn56" name="_ftnref56"&gt;&lt;sup&gt;&lt;sup&gt;[56]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and have held that in the context of merger control, data can be a relevant question if an undertaking achieves a dominant position through a merger, making it capable of gaining further market power through increased amounts of customer data. The evaluation of the market advantages of specific datasets has already been done in the past, and factors which have been deemed to be relevant have included whether the dataset could be replicated under reasonable conditions by competitors and whether the use of the dataset was likely to result in a significant competitive advantage.&lt;a href="#_ftn57" name="_ftnref57"&gt;&lt;sup&gt;&lt;sup&gt;[57]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; However, there are limited circumstances in which big data meets the four traditional criteria for being a barrier to entry or a source of sustainable competitive advantage — inimitability, rarity, value, and non-substitutability.&lt;a href="#_ftn58" name="_ftnref58"&gt;&lt;sup&gt;&lt;sup&gt;[58]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Any use of competition law to curb data-exclusionary or data-exploitative practices will first have to meet the threshold of establishing capacity for a firm to derive market power from its ability to sustain datasets unavailable to its competitors. In this context the peculiar ways in which network effects, multi-homing practices and how dynamic the digital markets are, are all relevant factors which could have both positive and negative impacts on competition. There is a need for greater discussion on data as a sources of market power in both digital and non-digital markets, and how this legal position can used to curb data monopolies, especially in light of government backed monopolies for identity verification and payments in India.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Consumer Protection Law&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;The Consumer Protection Bill, 2015, tabled in the Parliament towards the end of the monsoon session has introduced an expansive definition of the term “unfair trade practices.” The definition as per the Bill includes the disclosure “to any other person any personal information given in confidence by the consumer.” This clause excludes from the scope of unfair trade practices, disclosures under provisions of any law in force or in public interest. This provision could have significant impact on the personal data protection law in India. Alongside, there is also a need to ensure that principles such as safeguarding consumers personal information in order to ensure that the same is not used to their detriment are included within the definition of unfair trade practices. This would provide consumers an efficient and relatively speedy forum to contest adverse impacts on them of data driven decision-making.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Sectoral Regulation &lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;Our research into sectoral case studies revealed that there are a number of existing sectoral laws and policies that are applicable to aspects of AI. For example, in the health sector there is the Medical Council Professional Conduct, Etiquette, and Ethics Regulations 2002, the Electronic Health Records Standards 2016, the draft Medical Devices Rules 2017, the draft Digital Information Security in Healthcare Act.  In the finance sector there is the Credit Information Companies (Regulation) Act 2005 and 2006, the Securities and Exchange Board of India (Investment Advisers) Regulations, 2013, the Payment and Settlement Systems Act, 2007, the Banking Regulations Act 1949, SEBI guidelines on robo advisors etc. Before new regulations, guidelines etc are developed - a comprehensive exercise needs to be undertaken at a sectoral level to understand if 1. sectoral policy adequately addresses the changes being brought about by AI 2. If it does not - is an amendment possible and if not - what form of policy would fill the gap.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;Principled approach&lt;/b&gt;&lt;/h3&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Transparency&lt;/b&gt;&lt;/h4&gt;
&lt;h5 style="text-align: justify; "&gt;&lt;b&gt;Audits&lt;/b&gt;&lt;/h5&gt;
&lt;p style="text-align: justify; "&gt;Internal and external audits can be mechanisms towards creating transparency about the processes and results of AI solutions as they are implemented in a specific context. Audits can take place while a solution is still in ‘pilot’ mode and on a regular basis during implementation. For example,  in the Payment Card Industry (PCI) tool,  transparency is achieved through frequent audits, the results of which are simultaneously and instantly transmitted to the regulator and the developer. Ideally parts of the results of the audit are also made available to the public, even if the entire results are not shared.&lt;/p&gt;
&lt;h5 style="text-align: justify; "&gt;&lt;b&gt;Tiered Levels of Transparency&lt;/b&gt;&lt;/h5&gt;
&lt;p style="text-align: justify; "&gt;There are different levels and forms of transparency as well as different ways of achieving the same. The type and form of transparency can be tiered and dependent on factors such as criticality of function, potential direct and indirect harm, sensitivity of data involved, actor using the solution . The audience can also be tiered and could range from an individual user to senior level positions, to oversight bodies.&lt;/p&gt;
&lt;h5 style="text-align: justify; "&gt;&lt;b&gt;Human Facing Transparency&lt;/b&gt;&lt;/h5&gt;
&lt;p style="text-align: justify; "&gt;It will be important for India to define standards around human-machine interaction including the level of transparency that will be required. Will chatbots need to disclose that they are chatbots? Will a notice need to be posted that facial recognition technology is used in a CCTV camera? Will a company need to disclose in terms of service and privacy policies that data is processed via an AI driven solution? Will there be a distinction if the AI takes the decision autonomously vs. if the AI played an augmenting role? Presently, the Niti Aayog paper has been silent on this question.&lt;/p&gt;
&lt;h5 style="text-align: justify; "&gt;&lt;b&gt;Explainability&lt;/b&gt;&lt;/h5&gt;
&lt;p style="text-align: justify; "&gt;An explanation is not equivalent to complete  transparency. The obligation of providing an explanation does not mean  that the developer should necessarily  know the flow of bits through the AI system. Instead, the legal requirement of providing an explanation requires an ability to explain how certain parameters may be utilised to arrive at an outcome in a certain situation.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Doshi-Velez and Kortz have highlighted two technical ideas that may enhance a developer's ability to explain the functioning of AI systems:&lt;a href="#_ftn59" name="_ftnref59"&gt;&lt;sup&gt;&lt;sup&gt;[59]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;1) Differentiation and processing: AI systems are designed to have the inputs differentiated and processed through various forms of computation-in a reproducible and robust manner. Therefore, developers should be able to explain a particular decision by examining the inputs in an attempt to determine which of them have the greatest impact on the outcome.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;2) Counterfactual faithfulness: The second property of counterfactual faithfulness enables the developer to consider which factors caused a difference in the outcomes. Both these solutions can be deployed without necessarily knowing the contents of black boxes. As per Pasquale, ‘Explainability matters because the process of reason-giving is intrinsic to juridical determinations – not simply one modular characteristic jettisoned as anachronistic once automated prediction is sufficiently advanced.”&lt;a href="#_ftn60" name="_ftnref60"&gt;&lt;sup&gt;&lt;sup&gt;[60]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h5 style="text-align: justify; "&gt;&lt;b&gt;Rules based system applied contextually&lt;/b&gt;&lt;/h5&gt;
&lt;p style="text-align: justify; "&gt;Oswald et al have suggested two proposals that might  mitigate algorithmic opacity.by designing a broad rules-based system, whose implementation need to be applied in a context-specific manner which thoroughly evaluates the key enablers and challengers in each specific use case.&lt;a href="#_ftn61" name="_ftnref61"&gt;&lt;sup&gt;&lt;sup&gt;[61]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;Experimental proportionality was designed to enable the courts to make proportionality determinations of an algorithm at the experimental stage even before the impacts are fully realised in a manner that would enable them to ensure that appropriate metrics for performance evaluation and cohesive principles of design have been adopted. In such cases they recommend that the courts give the benefit of the doubt to the public sector body subject to another hearing within a stipulated period of time once data on the impacts of the algorithm become more readily available.&lt;/li&gt;
&lt;li&gt;‘ALGO-CARE' calls for the design of a rules-based system which ensures that the algorithms&lt;a href="#_ftn62" name="_ftnref62"&gt;&lt;sup&gt;&lt;sup&gt;[62]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; are:&lt;/li&gt;
&lt;/ul&gt;
&lt;p style="text-align: justify; "&gt;(1) Advisory: Algorithms must retain an advisory capacity that augments existing human capability rather than replacing human discretion outright;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;(2) Lawful: Algorithm's proposed function, application, individual effect and use of datasets should be considered in  symbiosis with necessity, proportionality and data minimisation principles;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;(3) Granularity: Issues such as data analysis issues such as meaning of data, challenges stemming from disparate tracts of data, omitted data and inferences  should be key points in the implementation process;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;(4) Ownership: Due regard should be given to intellectual property ownership but in the case of algorithms used for governance, it may be better to have open source algorithms at the default.  Regardless of the sector,the developer must ensure that the algorithm works in a manner that enables a third party to investigate the workings of the algorithm in an adversarial judicial context.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;(5)Challengeable:The results of algorithmic analysis should be applied with regard to professional codes and regulations and be challengeable. In a report evaluating the NITI AAYOG  Discussion Paper, CIS has argued that AI that is used for governance , must be made auditable in the public domain,if not under Free and Open Source Software (FOSS)-particularly in the case of AI that has implications for fundamental rights.&lt;a href="#_ftn63" name="_ftnref63"&gt;&lt;sup&gt;&lt;sup&gt;[63]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;(6) Accuracy: The design of the algorithm should check for accuracy;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;(7) Responsible: Should consider a wider set of ethical and moral principles and the foundations of human rights as a guarantor of human dignity at all levels and&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;(8) Explainable: Machine Learning should be interpretable and accountable.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;A rules based system like ALGO-CARE can enable predictability in use frameworks for AI. Predictability compliments and strengthens  transparency.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Accountability&lt;/b&gt;&lt;/h4&gt;
&lt;h5 style="text-align: justify; "&gt;&lt;b&gt;Conduct Impact Assessment&lt;/b&gt;&lt;/h5&gt;
&lt;p style="text-align: justify; "&gt;There is a need to evolve Algorithmic Impact Assessment frameworks for the different sectors in India, which should address issues of bias, unfairness and other harmful impacts of use of automated decision making. AI is a nascent field and the impact of the technology on the economy, society, etc. is still yet to be fully understood. Impact assessment standards will be important in identifying and addressing potential or existing harms and could potentially be more important in sectors or uses where there is direct human interaction with AI or power dimensions - such as in healthcare or use by the government. A 2018 Report by the AI Now Institute lists methods that should be adopted by the government for conducting his holistic assessment&lt;a href="#_ftn64" name="_ftnref64"&gt;&lt;sup&gt;&lt;sup&gt;[64]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;: These should  include: (1) Self-assessment by the government department in charge of implementing the technology, (2)Development of meaningful inter-disciplinary external researcher review mechanisms, (3) Notice to the public regarding  self-assessment and external review, (4)Soliciting of public comments for clarification or concerns, (5) Special regard to vulnerable communities who may not be able to exercise their voice in public proceedings. An adequate review mechanism which holistically evaluates the impact of AI would ideally include all five of these components in conjunction with each other.&lt;/p&gt;
&lt;h5 style="text-align: justify; "&gt;&lt;b&gt;Regulation of Algorithms&lt;/b&gt;&lt;/h5&gt;
&lt;p style="text-align: justify; "&gt;Experts have voiced concerns about AI mimicking human prejudices due to the biases present in the Machine Learning algorithms. Scientists have revealed through their research that machine learning algorithms can imbibe gender and racial prejudices which are ingrained in language patterns or data collection processes. Since AI and machine algorithms are data driven, they arrive at results and solutions based on available &lt;br /&gt; and historical data. When this data itself is biased, the solutions presented by the AI will also be biased. While this is inherently discriminatory, scientists have provided solutions to rectify these biases which can occur at various stages by introducing a counter bias at another stage. It has also been suggested that data samples should be shaped in such a manner so as to minimise the chances of algorithmic bias. Ideally regulation of algorithms could be tailored - explainability, traceability, scrutability. We recommend that the national strategy on AI policy must take these factors into account and combination of a central agency driving the agenda, and sectoral actors framing regulations around specific uses of AI that are problematic and implementation is required.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;As the government begins to adopt AI into governance - the extent to which and the  circumstances autonomous decision making capabilities can be delegated to AI need to be questioned. Questions on whether AI should be autonomous, should always have a human in the loop, and should have a ‘kill-switch’ when used in such contexts also need to be answered. A framework or high level principles can help to guide these determinations. For example:&lt;/p&gt;
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;Modeling Human Behaviour: An AI solution trying to model human behaviour, as in the case of judicial decision-making or predictive policing may need to be more regulated, adhere to stricter standards, and need more oversight than an algorithm that is trying to predict ‘natural’ phenomenon such as traffic congestion or weather patterns.&lt;/li&gt;
&lt;li&gt;Human Impact: An AI solution which could cause greater harm if applied erroneously-such as a robot soldier that mistakenly targets a civilian requires a different level and framework of regulation  than an AI solution  designed to create a learning path for a student in the education sector and errs in making an appropriate assessment.. &lt;/li&gt;
&lt;li&gt;Primary User: AI solutions whose primary users are state agents attempting to discharge duties in the public interest such as policemen, should be approached with more caution than those used by individuals such as farmers getting weather alerts&lt;/li&gt;
&lt;/ul&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Fairness&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;It is possible to incorporate broad definitions of fairness into a wide range of data analysis and classification systems.&lt;a href="#_ftn65" name="_ftnref65"&gt;&lt;sup&gt;&lt;sup&gt;[65]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; While there can be no bright-line rules that will necessarily enable the operator or designer of a Machine Learning System to arrive at an ex ante determination of fairness, from a public policy perspective, there must be a set of rules or best practices that explain how notions of fairness should be utilised in the real world applications of AI-driven solutions.&lt;a href="#_ftn66" name="_ftnref66"&gt;&lt;sup&gt;&lt;sup&gt;[66]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; While broad parameters should be encoded by the developer to ensure compliance with constitutional standards, it is also crucial that the functioning of the algorithm allows for an ex-post determination of fairness by an independent oversight body if the impact of the AI driven solution is challenged.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Further, while there is no precedent on this anywhere in the world, India could consider establishing a Committee entrusted with the specific task of continuously evaluating the operation of AI-driven algorithms. Questions that the government would need to answer with regard to this body include:&lt;/p&gt;
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;What should the composition of the body be?&lt;/li&gt;
&lt;li&gt;What should be the procedural mechanisms that govern the operation of the body?&lt;/li&gt;
&lt;li&gt;When should the review committee step in? This is crucial because excessive review may re-entrench the bureaucracy that the AI driven solution was looking to eliminate.&lt;/li&gt;
&lt;li&gt;What information will be necessary for the review committee to carry out its determination? Will there be conflicts with IP, and if so how will these be resolved?&lt;/li&gt;
&lt;li&gt;To what degree will the findings of the committee be made public?&lt;/li&gt;
&lt;li&gt;What powers will the committee have? Beyond making determinations, how will these be enforced?&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 style="text-align: justify; "&gt;&lt;b&gt;Market incentives&lt;/b&gt;&lt;/h3&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Standards as a means to address data issues&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;With digitisation of legacy records and the ability to capture more granular data digitally, one of the biggest challenges facing Big Data is a lack of standardised data and interoperability frameworks. This is particularly true in the healthcare and medicine sector where medical records do not follow a clear standard, which poses a challenge to their datafication and analysis. The presence of developed standards in data management and exchange,  interoperable Distributed Application Platform and Services, Semantic related standards for markup, structure, query, semantics, Information access and exchange have been spoken of as essential to address the issues of lack of standards in Big Data.&lt;a href="#_ftn67" name="_ftnref67"&gt;&lt;sup&gt;&lt;sup&gt;[67]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Towards enabling usability of data, it is important that clear data standards are established. This has been recognized by Niti Aayog in its National Strategy for AI. On one hand, there can operational issues with allowing each organisation to choose their own specific standards to operate under, while on the other hand, non-uniform digitisation of data will also cause several practical problems, most primarily to do with interoperability of the individual services, as well as their usability. For instance, in the healthcare sector, though India has adopted an EHR policy, implementation of this policy is not yet harmonized - leading to different interpretations of ‘digitizing records (i.e taking snapshots of doctor notes), retention methods and periods, and comprehensive implementation across all hospital data. Similarly, while independent banks and other financial organisations are already following, or in the process of developing internal practices,there exist no uniform standards for digitisation of financial data. As AI development, and application becomes more mainstream in the financial sector, the lack of a fixed standard could create significant problems.&lt;/p&gt;
&lt;h4 style="text-align: justify; "&gt;&lt;b&gt;Better Design Principles in Data Collection&lt;/b&gt;&lt;/h4&gt;
&lt;p style="text-align: justify; "&gt;An enduring criticism of the existing notice and consent framework has been that long, verbose and unintelligible privacy notices are not efficient in informing individuals and helping them make rational choices. While this problem predates Big Data, it has only become more pronounced in recent times, given the ubiquity of data collection and implicit ways in which data is being collected and harvested. Further, constrained interfaces on mobile devices, wearables, and smart home devices connected in an Internet of Things amplify the usability issues of the privacy notices. Some of the issues with privacy notices include Notice complexity, lack of real choices, notices decoupled from the system collecting data etc. An industry standard for a design approach to privacy notices which includes looking at factors such as the timing of the notice, the channels used for communicating the notices, the modality (written, audio, machine readable, visual) of the notice and whether the notice only provides information or also include choices within its framework, would be of great help.  Further, use of privacy by design principles can be done not just at the level of privacy notices but at each step of the information flow, and the architecture of the system can be geared towards more privacy enhanced choices.&lt;/p&gt;
&lt;hr /&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref1" name="_ftn1"&gt;&lt;sup&gt;&lt;sup&gt;[1]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/blog/artificial-intelligence-in-india-a-compendium&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref2" name="_ftn2"&gt;&lt;sup&gt;&lt;sup&gt;[2]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://obamawhitehouse.archives.gov/sites/default/files/whitehouse_files/microsites/ostp/NSTC/preparing_for_the_future_of_ai.pdf"&gt;https://obamawhitehouse.archives.gov/sites/default/files/whitehouse_files/microsites/ostp/NSTC/preparing_for_the_future_of_ai.pdf&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref3" name="_ftn3"&gt;&lt;sup&gt;&lt;sup&gt;[3]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://www.nitrd.gov/PUBS/national_ai_rd_strategic_plan.pdf"&gt;https://www.nitrd.gov/PUBS/national_ai_rd_strategic_plan.pdf&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref4" name="_ftn4"&gt;&lt;sup&gt;&lt;sup&gt;[4]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.gov.uk/government/publications/artificial-intelligence-sector-deal/ai-sector-deal&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref5" name="_ftn5"&gt;&lt;sup&gt;&lt;sup&gt;[5]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="http://www.nedo.go.jp/content/100865202.pdf"&gt;http://www.nedo.go.jp/content/100865202.pdf&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref6" name="_ftn6"&gt;&lt;sup&gt;&lt;sup&gt;[6]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.eu-robotics.net/sparc/10-success-stories/european-robotics-creating-new-markets.html?changelang=2&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref7" name="_ftn7"&gt;&lt;sup&gt;&lt;sup&gt;[7]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://www.cifar.ca/ai/pan-canadian-artificial-intelligence-strategy"&gt;https://www.cifar.ca/ai/pan-canadian-artificial-intelligence-strategy&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref8" name="_ftn8"&gt;&lt;sup&gt;&lt;sup&gt;[8]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://www.newamerica.org/cybersecurity-initiative/blog/chinas-plan-lead-ai-purpose-prospects-and-problems/"&gt;https://www.newamerica.org/cybersecurity-initiative/blog/chinas-plan-lead-ai-purpose-prospects-and-problems/&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref9" name="_ftn9"&gt;&lt;sup&gt;&lt;sup&gt;[9]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="http://www.uaeai.ae/en/"&gt;http://www.uaeai.ae/en/&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref10" name="_ftn10"&gt;&lt;sup&gt;&lt;sup&gt;[10]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://www.aisingapore.org/"&gt;https://www.aisingapore.org/&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref11" name="_ftn11"&gt;&lt;sup&gt;&lt;sup&gt;[11]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://news.joins.com/article/22625271"&gt;https://news.joins.com/article/22625271&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref12" name="_ftn12"&gt;&lt;sup&gt;&lt;sup&gt;[12]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://www.aiforhumanity.fr/pdfs/MissionVillani_Report_ENG-VF.pdf"&gt;https://www.aiforhumanity.fr/pdfs/MissionVillani_Report_ENG-VF.pdf&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref13" name="_ftn13"&gt;&lt;sup&gt;&lt;sup&gt;[13]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://ec.europa.eu/digital-single-market/en/news/communication-artificial-intelligence-europe"&gt;https://ec.europa.eu/digital-single-market/en/news/communication-artificial-intelligence-europe&lt;/a&gt; &lt;a href="https://www.euractiv.com/section/digital/news/twenty-four-eu-countries-sign-artificial-intelligence-pact-in-bid-to-compete-with-us-china/"&gt;https://www.euractiv.com/section/digital/news/twenty-four-eu-countries-sign-artificial-intelligence-pact-in-bid-to-compete-with-us-china/&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref14" name="_ftn14"&gt;&lt;sup&gt;&lt;sup&gt;[14]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.aitf.org.in/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref15" name="_ftn15"&gt;&lt;sup&gt;&lt;sup&gt;[15]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; http://www.niti.gov.in/writereaddata/files/document_publication/NationalStrategy-for-AI-Discussion-Paper.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref16" name="_ftn16"&gt;&lt;sup&gt;&lt;sup&gt;[16]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://obamawhitehouse.archives.gov/sites/default/files/whitehouse_files/microsites/ostp/NSTC/preparing_for_the_future_of_ai.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref17" name="_ftn17"&gt;&lt;sup&gt;&lt;sup&gt;[17]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.cifar.ca/ai/pan-canadian-artificial-intelligence-strategy&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref18" name="_ftn18"&gt;&lt;sup&gt;&lt;sup&gt;[18]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/blog/the-ai-task-force-report-the-first-steps-towards-indias-ai-framework&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref19" name="_ftn19"&gt;&lt;sup&gt;&lt;sup&gt;[19]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/blog/niti-aayog-discussion-paper-an-aspirational-step-towards-india2019s-ai-policy&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref20" name="_ftn20"&gt;&lt;sup&gt;&lt;sup&gt;[20]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="https://ec.europa.eu/digital-single-market/en/news/communication-artificial-intelligence-europe"&gt;https://ec.europa.eu/digital-single-market/en/news/communication-artificial-intelligence-europe&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref21" name="_ftn21"&gt;&lt;sup&gt;&lt;sup&gt;[21]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; http://pib.nic.in/newsite/PrintRelease.aspx?relid=181007&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref22" name="_ftn22"&gt;&lt;sup&gt;&lt;sup&gt;[22]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Ryan Calo, 2017 Artificial Intelligence Policy: A Primer and Roadmap. U.C. Davis L. Review,&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Vol. 51, pp. 398 - 435.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt; &lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref23" name="_ftn23"&gt;&lt;sup&gt;&lt;sup&gt;[23]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://trai.gov.in/sites/default/files/CIS_07_11_2017.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref24" name="_ftn24"&gt;&lt;sup&gt;&lt;sup&gt;[24]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.datatilsynet.no/globalassets/global/english/ai-and-privacy.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref25" name="_ftn25"&gt;&lt;sup&gt;&lt;sup&gt;[25]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; http://www.niti.gov.in/writereaddata/files/document_publication/NationalStrategy-for-AI-Discussion-Paper.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref26" name="_ftn26"&gt;&lt;sup&gt;&lt;sup&gt;[26]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://martechtoday.com/bottos-launches-a-marketplace-for-data-to-train-ai-models-214265&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref27" name="_ftn27"&gt;&lt;sup&gt;&lt;sup&gt;[27]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://opensource.com/article/18/5/top-8-open-source-ai-technologies-machine-learning&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref28" name="_ftn28"&gt;&lt;sup&gt;&lt;sup&gt;[28]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Amanda Levendowski, How Copyright Law Can Fix Artificial Intelligence’s&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Implicit Bias Problem, 93 WASH. L. REV. (forthcoming 2018) (manuscript at 23, 27-32),&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3024938"&gt;https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3024938&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref29" name="_ftn29"&gt;&lt;sup&gt;&lt;sup&gt;[29]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;i&gt;Id&lt;/i&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref30" name="_ftn30"&gt;&lt;sup&gt;&lt;sup&gt;[30]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; H. Brendan McMahan, et al., Communication-Efficient Learning of Deep Networks&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;from Decentralized Data, arXiv:1602.05629 (Feb. 17, 2016), &lt;a href="https://arxiv.org/abs/1602.05629"&gt;https://arxiv.org/abs/1602.05629&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref31" name="_ftn31"&gt;&lt;sup&gt;&lt;sup&gt;[31]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;i&gt;Id&lt;/i&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref32" name="_ftn32"&gt;&lt;sup&gt;&lt;sup&gt;[32]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Pierre N. Leval, Nimmer Lecture: Fair Use Rescued, 44 UCLA L. REV. 1449, 1457 (1997).&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref33" name="_ftn33"&gt;&lt;sup&gt;&lt;sup&gt;[33]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/blog/niti-aayog-discussion-paper-an-aspirational-step-towards-india2019s-ai-policy&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref34" name="_ftn34"&gt;&lt;sup&gt;&lt;sup&gt;[34]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/blog/niti-aayog-discussion-paper-an-aspirational-step-towards-india2019s-ai-policy&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref35" name="_ftn35"&gt;&lt;sup&gt;&lt;sup&gt;[35]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Discussion Paper on National Strategy for Artificial Intelligence | NITI Aayog | National Institution for Transforming India. (n.d.) p. 54. Retrieved from http://niti.gov.in/content/national-strategy-ai-discussion-paper.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref36" name="_ftn36"&gt;&lt;sup&gt;&lt;sup&gt;[36]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Leverhulme Centre for the Future of Intelligence, http://lcfi.ac.uk/.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref37" name="_ftn37"&gt;&lt;sup&gt;&lt;sup&gt;[37]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; AI Now, https://ainowinstitute.org/.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref38" name="_ftn38"&gt;&lt;sup&gt;&lt;sup&gt;[38]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/ai-and-governance-case-study-pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref39" name="_ftn39"&gt;&lt;sup&gt;&lt;sup&gt;[39]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; http://iridescentlearning.org/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref40" name="_ftn40"&gt;&lt;sup&gt;&lt;sup&gt;[40]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/ai-and-governance-case-study-pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref41" name="_ftn41"&gt;&lt;sup&gt;&lt;sup&gt;[41]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Points, L., &amp;amp; Potton, E. (2017). Artificial intelligence and automation in the UK.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref42" name="_ftn42"&gt;&lt;sup&gt;&lt;sup&gt;[42]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Paul, Y., Hickok, E., Sinha, A. and Tiwari, U., Artificial Intelligence in the Healthcare Industry in India, Centre for Internet and Society. Available at &lt;a href="https://cis-india.org/internet-governance/files/ai-and-healtchare-report"&gt;https://cis-india.org/internet-governance/files/ai-and-healtchare-report&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref43" name="_ftn43"&gt;&lt;sup&gt;&lt;sup&gt;[43]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Goudarzi, S., Hickok, E., and Sinha, A., AI in the Banking and Finance Industry in India,  Centre for Internet and Society. Available at &lt;a href="https://cis-india.org/internet-governance/blog/ai-in-banking-and-finance"&gt;https://cis-india.org/internet-governance/blog/ai-in-banking-and-finance&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref44" name="_ftn44"&gt;&lt;sup&gt;&lt;sup&gt;[44]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Paul, Y., Hickok, E., Sinha, A. and Tiwari, U., Artificial Intelligence in the Healthcare Industry in India, Centre for Internet and Society. Available at &lt;a href="https://cis-india.org/internet-governance/files/ai-and-healtchare-report"&gt;https://cis-india.org/internet-governance/files/ai-and-healtchare-report&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref45" name="_ftn45"&gt;&lt;sup&gt;&lt;sup&gt;[45]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://news.microsoft.com/en-in/government-karnataka-inks-mou-microsoft-use-ai-digital-agriculture/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref46" name="_ftn46"&gt;&lt;sup&gt;&lt;sup&gt;[46]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://news.microsoft.com/en-in/government-telangana-adopts-microsoft-cloud-becomes-first-state-use-artificial-intelligence-eye-care-screening-children/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref47" name="_ftn47"&gt;&lt;sup&gt;&lt;sup&gt;[47]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; NITI Aayog. (2018). Discussion Paper on National Strategy for Artificial Intelligence. Retrieved from http://niti.gov.in/content/national-strategy-ai-discussion-paper. 18&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref48" name="_ftn48"&gt;&lt;sup&gt;&lt;sup&gt;[48]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://edps.europa.eu/sites/edp/files/publication/16-10-19_marrakesh_ai_paper_en.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref49" name="_ftn49"&gt;&lt;sup&gt;&lt;sup&gt;[49]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://cis-india.org/internet-governance/blog/the-srikrishna-committee-data-protection-bill-and-artificial-intelligence-in-india&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref50" name="_ftn50"&gt;&lt;sup&gt;&lt;sup&gt;[50]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; J. Schradie, The Digital Production Gap: The Digital Divide and Web 2.0 Collide. Elsevier Poetics, 39 (1).&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref51" name="_ftn51"&gt;&lt;sup&gt;&lt;sup&gt;[51]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; D Lazer, et al., The Parable of Google Flu: Traps in Big Data Analysis. Science. 343 (1).&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref52" name="_ftn52"&gt;&lt;sup&gt;&lt;sup&gt;[52]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Danah Boyd and Kate Crawford,  Critical Questions for Big Data. Information, Communication &amp;amp; Society. 15 (5).&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref53" name="_ftn53"&gt;&lt;sup&gt;&lt;sup&gt;[53]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; John Podesta, (2014) Big Data: Seizing Opportunities, Preserving Values, available at&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="http://www.whitehouse.gov/sites/default/files/docs/big_data_privacy_report_may_1_2014.pdf"&gt;http://www.whitehouse.gov/sites/default/files/docs/big_data_privacy_report_may_1_2014.pdf&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref54" name="_ftn54"&gt;&lt;sup&gt;&lt;sup&gt;[54]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; E. Ramirez, (2014) FTC to Examine Effects of Big Data on Low Income and Underserved Consumers at September Workshop, available at &lt;a href="http://www.ftc.gov/news-events/press-releases/2014/04/ftc-examine-effects-big-data-lowincome-underserved-consumers"&gt;http://www.ftc.gov/news-events/press-releases/2014/04/ftc-examine-effects-big-data-lowincome-underserved-consumers&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref55" name="_ftn55"&gt;&lt;sup&gt;&lt;sup&gt;[55]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; M. Schrage, Big Data’s Dangerous New Era of Discrimination, available at &lt;a href="http://blogs.hbr.org/2014/01/bigdatas-dangerous-new-era-of-discrimination/"&gt;http://blogs.hbr.org/2014/01/bigdatas-dangerous-new-era-of-discrimination/&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref56" name="_ftn56"&gt;&lt;sup&gt;&lt;sup&gt;[56]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Google/DoubleClick Merger case&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref57" name="_ftn57"&gt;&lt;sup&gt;&lt;sup&gt;[57]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; French Competition Authority, Opinion n°10-A-13 of 1406.2010,&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;http://www.autoritedelaconcurrence.fr/pdf/avis/10a13.pdf. That opinion of the Authority aimed at&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;giving general guidance on that subject. It did not focus on any particular market or industry&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;although it described a possible application of its analysis to the telecom industry.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref58" name="_ftn58"&gt;&lt;sup&gt;&lt;sup&gt;[58]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="http://www.analysisgroup.com/is-big-data-a-true-source-of-market-power/#sthash.5ZHmrD1m.dpuf"&gt;http://www.analysisgroup.com/is-big-data-a-true-source-of-market-power/#sthash.5ZHmrD1m.dpuf&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref59" name="_ftn59"&gt;&lt;sup&gt;&lt;sup&gt;[59]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Doshi-Velez, F., Kortz, M., Budish, R., Bavitz, C., Gershman, S., O'Brien, D., ... &amp;amp; Wood, A. (2017). Accountability of AI under the law: The role of explanation. arXiv preprint arXiv:1711.01134.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref60" name="_ftn60"&gt;&lt;sup&gt;&lt;sup&gt;[60]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Frank A. Pasquale ‘Toward a Fourth Law of Robotics: Preserving Attribution, Responsibility, and Explainability in an Algorithmic Society’ (July 14, 2017). Ohio State Law Journal, Vol. 78, 2017; U of Maryland Legal Studies Research Paper No. 2017-21, 7.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref61" name="_ftn61"&gt;&lt;sup&gt;&lt;sup&gt;[61]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Oswald, M., Grace, J., Urwin, S., &amp;amp; Barnes, G. C. (2018). Algorithmic risk assessment policing models: lessons from the Durham HART model and ‘Experimental’ proportionality. Information &amp;amp; Communications Technology Law, 27(2), 223-250.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref62" name="_ftn62"&gt;&lt;sup&gt;&lt;sup&gt;[62]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Ibid.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref63" name="_ftn63"&gt;&lt;sup&gt;&lt;sup&gt;[63]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Abraham S., Hickok E., Sinha A., Barooah S., Mohandas S., Bidare P. M., Dasgupta S., Ramachandran V., and Kumar S., NITI Aayog Discussion Paper: An aspirational step towards India’s AI policy. Retrieved from https://cis-india.org/internet-governance/files/niti-aayog-discussion-paper.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref64" name="_ftn64"&gt;&lt;sup&gt;&lt;sup&gt;[64]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Reisman D., Schultz J., Crawford K., Whittaker M., (2018, April) Algorithmic Impact Assessments: A Practical Framework For Public Agency Accountability. Retrieved from https://ainowinstitute.org/aiareport2018.pdf.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref65" name="_ftn65"&gt;&lt;sup&gt;&lt;sup&gt;[65]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Sample I., (2017, November 5) Computer says no: why making AIs fair, accountable and transparent is crucial. Retrieved from &lt;a href="https://www.theguardian.com/science/2017/nov/05/computer-says-no-why-making-ais-fair-accountable-and-transparent-is-crucial"&gt;https://www.theguardian.com/science/2017/nov/05/computer-says-no-why-making-ais-fair-accountable-and-transparent-is-crucial&lt;/a&gt;.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref66" name="_ftn66"&gt;&lt;sup&gt;&lt;sup&gt;[66]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Kroll, J. A., Barocas, S., Felten, E. W., Reidenberg, J. R., Robinson, D. G., &amp;amp; Yu, H. (2016). Accountable algorithms. U. Pa. L. Rev., 165, 633.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref67" name="_ftn67"&gt;&lt;sup&gt;&lt;sup&gt;[67]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;a href="http://www.iso.org/iso/big_data_report-jtc1.pdf"&gt;http://www.iso.org/iso/big_data_report-jtc1.pdf&lt;/a&gt;&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/ai-in-india-a-policy-agenda'&gt;https://cis-india.org/internet-governance/blog/ai-in-india-a-policy-agenda&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Amber Sinha, Elonnai Hickok and Arindrajit Basu</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-09-05T15:39:59Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/blog/the-srikrishna-committee-data-protection-bill-and-artificial-intelligence-in-india">
    <title>The Srikrishna Committee Data Protection Bill and Artificial Intelligence in India</title>
    <link>https://cis-india.org/internet-governance/blog/the-srikrishna-committee-data-protection-bill-and-artificial-intelligence-in-india</link>
    <description>
        &lt;b&gt;Artificial Intelligence in many ways is in direct conflict with traditional data protection principles and requirements including consent, purpose limitation, data minimization, retention and deletion, accountability, and transparency.&lt;/b&gt;
        &lt;h3 style="text-align: justify; "&gt;Privacy Considerations in AI&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;Other related privacy concerns in the context of AI center around re-identification and de-anonymisation, discrimination, unfairness, inaccuracies, bias, opacity, profiling, and misuse of data and imbedded power dynamics.&lt;a href="#_ftn1" name="_ftnref1"&gt;&lt;sup&gt;[1]&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The need for large amounts of data to improve accuracy, the ability to process vast amounts of granular data, and the present relationship between explainability and result of AI systems&lt;a href="#_ftn2" name="_ftnref2"&gt;&lt;sup&gt;&lt;sup&gt;[2]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; have raised many concerns on both sides of the fence. On one hand, there is concern that heavy handed or inappropriate regulation will result in stifling innovation. If developers can only use data for pre-defined purpose - the prospects of AI are limited. On the other hand, individuals are concerned that privacy will be significantly undermined in light of AI systems that collect and process data in realtime and at a personal level not previously possible. Chatbots, house assistants, wearable devices, robot caregivers, facial recognition technology etc.  have the ability to collect data from a person at an intimate level. At the sametime, some have argued that AI can work towards protecting privacy by limiting the access that humans working at respective companies have to personal data.&lt;a href="#_ftn3" name="_ftnref3"&gt;&lt;sup&gt;&lt;sup&gt;[3]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;India is embracing AI. Two national roadmaps for AI were released in 2018 respectively by the Ministry of Commerce and Industry and Niti Aayog. Both roadmaps emphasized the importance of addressing privacy concerns in the context of AI and ensuring that a robust privacy legislation is enacted. In August 2018, the Srikrishna Committee released a draft Personal Data Protection Bill 2018 and the associated report that outlines and justifies a framework for privacy in India. As the development and use of AI in India continues to grow, it is important that India simultaneously moves forward with a privacy framework that addresses the privacy dimensions of AI.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;In this article we attempt to analyse if and how the Srikrishna committee draft Bill  and report has addressed AI, contrast this with developments in the EU and the passing of the GDPR, and identify solutions that are being explored towards finding a way to develop AI while upholding and safeguarding privacy.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;The GDPR and Artificial Intelligence&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;The General Data Protection Regulation became enforceable in May 2018 and establishes a framework for the processing of personal data for individuals within the European Union. The GDPR has been described by IAAP  as taking a ‘risk based’ approach to data protection that pushes data controllers to engage in risk analysis and adopt ‘risk measured responses’.&lt;a href="#_ftn4" name="_ftnref4"&gt;&lt;sup&gt;&lt;sup&gt;[4]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Though the GDPR does not explicitly address artificial intelligence, it does have a number of provisions that address automated decision making and profiling and a number of provisions that will impact companies using artificial intelligence in their business activities. These have been outlined below:&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Data rights: &lt;/b&gt; The GDPR enables individuals with a number of  data rights: the right to be informed, right of access, right to rectification, right to erasure, right to restrict processing, right to data portability, right to object, and rights related to automated decision making including profiling.  The last right - rights related to automated decision making - seeks to address concerns arising out of automated decision making by giving the individual the right to request to not be subject to a decision based solely on automated decision making including profiling if the decision would produce legal effects or similarly significantly affects them.  There are three exceptions to this right - if the automated decision making is:  a. necessary for the performance of a contract, b. authorised by the Union or Member State c. is based on explicit consent.&lt;a href="#_ftn5" name="_ftnref5"&gt;&lt;sup&gt;&lt;sup&gt;[5]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;/li&gt;
&lt;li&gt;&lt;b&gt;Transparency:&lt;/b&gt; Under Article 14, data controllers must enable the right to opt out of automated decision making by notifying individuals of the existence of automated decision making including profiling and providing meaningful information about the logic involved as well as the potential consequences of such processing.&lt;a href="#_ftn6" name="_ftnref6"&gt;&lt;sup&gt;&lt;sup&gt;[6]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Importantly, this requirement has the potential of ensuring that companies do not operate complete  ‘black box’ algorithms within their business processes.&lt;/li&gt;
&lt;li&gt;&lt;b&gt;Fairness: &lt;/b&gt;The principle of fairness found under Article 5(1) will also apply to the processing of personal data by AI. The principle requires that personal data must be processed in a way to meet the three conditions of lawfully, fairly, and in a transparent manner in relation to the data subject. Recital 71 further clarifies that this will include implementing appropriate mathematical and statistical measures for profiling, ensuring that inaccuracies are corrected, and  ensuring that processing that does not result in negative discriminatory results.&lt;a href="#_ftn7" name="_ftnref7"&gt;&lt;sup&gt;&lt;sup&gt;[7]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;/li&gt;
&lt;li&gt;&lt;b&gt;Purpose Limitation:&lt;/b&gt; The principle of purpose limitation (Article 5(1)(b) requires that personal data must be collected for  specified, explicit, and legitimate purposes and not be further processed in a manner incompatible with those purposes.  Processing for archiving purposes in the public interest, scientific or historical research purposes or statistical purposes are not considered to be incompatible with the initial purposes. It has been noted that it is unclear if research carried out through artificial intelligence would fall under this exception as the GDPR does not define ‘scientific purposes’.&lt;a href="#_ftn8" name="_ftnref8"&gt;&lt;sup&gt;&lt;sup&gt;[8]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;/li&gt;
&lt;li&gt;&lt;b&gt;Privacy by Design and Default:&lt;/b&gt; Article 25 requires all data controllers to implement technical and organizational measures to meet the requirements of the regulation. This could include techniques like pseudonymisation. Data controllers also are required to implement appropriate technical and organizational measures for ensuring that by default only personal data which are necessary for a specific purpose are processed.&lt;a href="#_ftn9" name="_ftnref9"&gt;&lt;sup&gt;&lt;sup&gt;[9]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;b&gt;Data Protection Impact Assessments:&lt;/b&gt; Article 35 requires data controllers to undertake impact assessments if they are undertaking processing that is likely to result in a high risk to individuals. This includes if the data controller undertakes: systematic and extensive profiling, processes special categories of criminal offence data on a large scale, systematically monitor publicly accessible places on a large scale. In implementation, some jurisdictions like the UK require impact assessments on additional conditions including if the data controller: uses new technologies, uses profiling or special category data to decide on access to services, profile individuals on a large scale, process biometric data, process genetic data, match data or combine datasets from different sources, collect personal data from a source other than the individual without providing them with a privacy notice, track individuals’ location or behaviour, profile children or target marketing or online services at them, process data that might endanger the individual’s physical health or safety in the event of a security breach.&lt;a href="#_ftn10" name="_ftnref10"&gt;&lt;sup&gt;&lt;sup&gt;[10]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;b&gt;Security:&lt;/b&gt; Article 30 requires data controllers to ensure a level of security appropriate to the risk including employing methods like encryption and pseudonymization. &lt;/li&gt;
&lt;/ol&gt;
&lt;h3 style="text-align: justify; "&gt;Srikrishna Committee Bill and AI&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;The Draft Data Protection Bill and associated report by the Srikrishna Committee was published in August 2018 and recommends a privacy framework for India. The Bill contains a number of provisions that will directly impact data fiduciaries using AI and that try and account for the unintended consequences of emerging technologies like AI. These include:&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Definition of Harm:&lt;/b&gt; The Bill defines harm as including bodily or mental injury, loss, distortion or theft of identity, financial loss or loss of property, loss of reputation or humiliation, loss of employment, any discriminatory treatment, any subjection to blackmail or extortion, any denial or withdrawal of a service, benefit or good resulting from an evaluative decision about the data principal, any restriction placed or suffered directly or indirectly on speech, movement or any other action arising out of a fear of being observed or surveilled, any observation or surveillance that is not reasonably expected by the data principal. The Bill also allows for categories of significant harm to be further defined by the data protection authority.&lt;/li&gt;
&lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;Many of the above are harms that have been associated with artificial intelligence - specifically loss employment, discriminatory treatment, and denial of service. Enabling the data protection authority to further define categories of  significant harm, could allow for unexpected harms arising from the use of AI to come under the ambit of the Bill.&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Data Rights:&lt;/b&gt; Like the GDPR, the Bill creates a set of data rights for the individual including the right to confirmation and access, correction, data portability, and right to be forgotten. At the sametime the Bill is intentionally silent on the rights and obligations that have been incorporated into the GDPR that address automated decision making including: The right to object to processing,&lt;a href="#_ftn11" name="_ftnref11"&gt;&lt;sup&gt;&lt;sup&gt;[11]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; the right to opt out of automated decision making&lt;a href="#_ftn12" name="_ftnref12"&gt;&lt;sup&gt;&lt;sup&gt;[12]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;, and the obligation on the data controller to inform the individual about the use of automated decision making and basic information regarding the logic and impact of same.&lt;a href="#_ftn13" name="_ftnref13"&gt;&lt;sup&gt;&lt;sup&gt;[13]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; As justification, in their report the Committee noted the following: The right to restrict processing may be unnecessary in India as it provides only interim remedies around issues such as inaccuracy of data and the same can be achieved by a data principal approaching the DPA or courts for a stay on processing as well as simply withdraw consent. The objective of protecting against discrimination, bias, and opaque decisions that the right to object to automated processing and receive information about the processing of data in the Indian context seeks to fulfill would be better achieved through an accountability framework requiring specific data fiduciaries that will be making evaluative decisions through automated means to set up processes that ‘weed out’ discrimination. At the same time, if discrimination has taken place, individuals can seek remedy through the courts.&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;By taking this approach, the Bill creates a framework to address harms arising out of AI, but does not empower the individual to decide how their data is processed and remains silent on the issue of ‘black box’ algorithms.&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Data Quality&lt;/b&gt;: Requires data fiduciaries to ensure that personal data that is processed is complete, accurate, not misleading and updated with respect to the purposes for which it is processed. When taking steps to comply with this - data fiduciaries must take into consideration if the personal data is likely to be used to make a decision about the data principal, if it is likely to be disclosed to other individuals, if the personal data is kept in a form that distinguishes personal data based on facts from personal data based on opinions or personal assessments.&lt;a href="#_ftn14" name="_ftnref14"&gt;&lt;sup&gt;&lt;sup&gt;[14]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;This principle, while not mandating that data fiduciaries take into account considerations such as biases in datasets, could potentially be be interpreted by the data protection authority to include in its scope, means towards ensuring that data does not contain or result in bias.&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Principle of Privacy by Design:&lt;/b&gt; Requires significant data fiduciaries to have in place a number policies and measures around several aspects of privacy. These include - (a) measures to ensure managerial, organizational, business practices and technical systems are designed in a manner to anticipate, identify, and avoid harm to the data principal (b) the obligations mentioned in Chapter II are embedded in organisational and business practices (c) technology used in the processing of personal data is in accordance with commercially accepted or certified standards (d) legitimate interests of business including any innovation is achieved without compromising privacy interests (e) privacy is protected throughout processing from the point of collection to deletion of personal data (f) processing of personal data is carried out in a transparent manner (g) the interest of the data principal is accounted for at every stage of processing of personal data.&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;A number of these (a, d, e, and g)  require that the interest of the data principal is accounted for throughout the processing of personal data, This will be  significant for systems driven by artificial intelligence as a number of the harms that have arisen from the use of AI include discrimination, denial of service, or loss of employment - have been brought under the definition of harm within the Bill. Placing the interest of the data principal first is also important in protecting against unintended consequences or harms that may arise from AI.&lt;a href="#_ftn15" name="_ftnref15"&gt;&lt;sup&gt;&lt;sup&gt;[15]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; If enacted, it will be important to see what policies and measures emerge in the context of AI to comply with this principle. It will also be important to see what commercially accepted or certified standard companies rely on to comply with (c.)&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Data Protection Impact Assessment:&lt;/b&gt; Requires data fiduciaries to undertake a data protection impact assessment when implementing new technologies or large scale profiling or use of sensitive personal data. Such assessments need to include a detailed description of the proposed processing operation, the purpose of the processing and the nature of personal data being processed, an assessment of the potential harm that may be caused to the data principals whose personal data is proposed to be processed, and measures for managing, minimising, mitigating or removing such risk of harm. If the Authority finds that the processing is likely to cause harm to the data principles, it may direct the data fiduciary to undertake processing in certain circumstances or entirely.  This requirement applies to all significant data fiduciaires and all other data fiduciaries as required by the DPA.&lt;a href="#_ftn16" name="_ftnref16"&gt;&lt;sup&gt;&lt;sup&gt;[16]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;This principle will apply to companies implementing AI systems. For AI systems, it will be important to see how much information the DPA will require under the requirement of data fiduciaries providing detailed descriptions of the proposed processing operation and purpose of processing.&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Classification of data fiduciaries as significant data fiduciaries&lt;/b&gt;: The Authority has the ability to notify certain categories of data fiduciaries as significant data fiduciaries based on 1. The volume of personal data processed, 2. The sensitivity of personal data processed, turnover of the data fiduciary, risk of harm resulting from any processing being undertaken by the fiduciary, use of new technologies for processing, and other factor relevant for causing harm to any data principal. If a data fiduciary falls under the ambit of any of these conditions they are required to register with the Authority. All significant data fiduciaries must undertake data protection impact assessments, maintain records as per the bill, under go data audits, and have in place a data protection officer.&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;As per this provision - companies deploying artificial intelligence would come under the definition of a significant data fiduciary and be subject to the principles of privacy by design etc. articulated in the chapter. The exception to this will be if the data fiduciary comes under the definition of ‘small entity’ found in section 48.&lt;a href="#_ftn17" name="_ftnref17"&gt;&lt;sup&gt;&lt;sup&gt;[17]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Restrictions on cross border transfer of personal data: &lt;/b&gt;Requires that all data fiduciaries must store a copy of personal data on a server or data centre located in India and notified categories of critical personal data must be processed in servers located in India.&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;It is interesting to note that in the context of cross border sharing of data,  the Bill is creating a new category of data that can be further defined beyond personal and sensitive personal data. For companies implementing artificial intelligence, this provision may prove cumbersome to comply with as many utilize cloud storage and facilities located outside of India for the processing of larger amounts of data.&lt;a href="#_ftn18" name="_ftnref18"&gt;&lt;sup&gt;&lt;sup&gt;[18]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Powers and functions of the Authority&lt;/b&gt;: The Bill lays down a number of functions of the Authority one being to monitor technological developments and commercial practices that may affect protection of personal data.&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;By assumption, this will include monitoring of technological developments in the field of Artificial Intelligence.&lt;a href="#_ftn19" name="_ftnref19"&gt;&lt;sup&gt;&lt;sup&gt;[19]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Fair and reasonable processing: &lt;/b&gt;Requires that any person processing personal data owes a duty to the data principal to process such personal data in a fair and reasonable manner that respects the privacy of the data principal. In the Srikrishna Committee report, the committee explains that the principle of the fair and reasonable is meant to address 1. Power asymmetries between data subjects and data fiduciaries - recognizing that data fiduciaires have a responsibility to act in the best interest of the data principal 2. Situations where processing may be legal but not necessary fair or in the best interest of the data principal 3. Developing trust between the data principal and the data fiduciary.&lt;a href="#_ftn20" name="_ftnref20"&gt;&lt;sup&gt;&lt;sup&gt;[20]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;This is in contrast to the GDPR which requires processing to simultaneously meet the three conditions of fairness, lawfulness, and transparency.&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt; 
&lt;ul style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Purpose Limitation: &lt;/b&gt;Personal data can only be processed for the purposes specified or any other purpose that the data principal would reasonably expect.&lt;/li&gt;
&lt;/ul&gt;
&lt;ol style="text-align: justify; "&gt; &lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;As a note, the Srikrishna Committee Bill does not include ‘scientific purposes’ as an exception to the principle of purpose limitation as found in the GDPR,&lt;a href="#_ftn21" name="_ftnref21"&gt;&lt;sup&gt;&lt;sup&gt;[21]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and instead creates an exception for research, archiving, or statistical purposes.&lt;a href="#_ftn22" name="_ftnref22"&gt;&lt;sup&gt;&lt;sup&gt;[22]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; The DPA has the responsibility of developing codes defining research purposes under the act.&lt;a href="#_ftn23" name="_ftnref23"&gt;&lt;sup&gt;&lt;sup&gt;[23]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt;
&lt;li&gt;&lt;b&gt;Security Safeguards:&lt;/b&gt; Every data fiduciary must implement appropriate security safeguards including the use of methods such as de-identification and encryption, steps to protect the integrity of personal data, and steps necessary to prevent misuse, unauthorised access to, modification, and disclosure or destruction of personal data.&lt;a href="#_ftn24" name="_ftnref24"&gt;&lt;sup&gt;&lt;sup&gt;[24]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ol&gt;
&lt;p style="text-align: justify; "&gt;Unlike the GDPR which explicitly refers to the technique of pseudonymization, the Srikrishna  uses Bill uses term de-identification.  The Srikrishna Report clarifies that the this includes techniques like pseudonymization and masking and further clarifies that because of the  risk of re-identification, de-identified personal data should still receive the same level of protection as personal data. The Bill further gives the DPA the authority to define appropriate levels of anonymization. &lt;a href="#_ftn25" name="_ftnref25"&gt;&lt;sup&gt;&lt;sup&gt;[25]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Technical perspectives of Privacy and AI&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;There is an emerging body of work that is looking at solutions to the dilemma of maintaining privacy while employing artificial intelligence and finding ways in which artificial intelligence can support and strengthen privacy. For example, there are AI driven platforms that leverage the technology to help a business to meet regulatory compliance with data protection laws&lt;a href="#_ftn26" name="_ftnref26"&gt;&lt;sup&gt;&lt;sup&gt;[26]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;, as well as research into AI privacy enhancing technologies.&lt;a href="#_ftn27" name="_ftnref27"&gt;&lt;sup&gt;&lt;sup&gt;[27]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Standards setting bodies like IEEE have undertaken work on the ethical considerations in the collection and use of personal data when designing, developing, and/or deploying AI through the standard ‘Ethically Aligned Design’.&lt;a href="#_ftn28" name="_ftnref28"&gt;&lt;sup&gt;&lt;sup&gt;[28]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; . In the article Artificial Intelligence and Privacy by Datatilsynet - the Norwegian Data Protection Authority&lt;a href="#_ftn29" name="_ftnref29"&gt;&lt;sup&gt;&lt;sup&gt;[29]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; break such methods into three categories:&lt;/p&gt;
&lt;ol style="text-align: justify; "&gt;
&lt;li&gt;Techniques for reducing the need for large amounts of training data: Such techniques  can include&lt;/li&gt;
&lt;ol&gt;
&lt;li&gt;&lt;b&gt;Generative adversarial networks (GANs):&lt;/b&gt; GANs are used to create synthetic data and can address the need for large volumes of labelled data without relying on real data containing personal data. GANs could potentially be useful from a research and development perspective in sectors like healthcare where most data would quality as sensitive personal data.&lt;/li&gt;
&lt;li&gt;&lt;b&gt;Federated Learning:&lt;/b&gt; Federated learning allows for models to be trained and improved on data from a large pool of users without directly using user data. This is achieved by running a centralized model on a client unit and subsequently improved on local data. Changes from the improvements are shared back with the centralized server. An average of the changes from multiple individual client units becomes the basis for improving the  centralized model.&lt;/li&gt;
&lt;li&gt;&lt;b&gt;Matrix Capsules&lt;/b&gt;: Proposed by Google researcher Geoff Hinton, Matrix Capsules improve the accuracy of existing neural networks while requiring less data.&lt;a href="#_ftn30" name="_ftnref30"&gt;&lt;sup&gt;&lt;sup&gt;[30]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ol&gt;
&lt;li&gt;Techniques that uphold data protection without reducing the basic data set&lt;/li&gt;
&lt;ol&gt;
&lt;li&gt;&lt;b&gt;Differential Privacy&lt;/b&gt;: Differential privacy intentionally adds ‘noise’ to data when accessed. This allows for personal data to be accessed with revealing identifying information.&lt;/li&gt;
&lt;li&gt;&lt;b&gt;Homomorphic Encryption:&lt;/b&gt; Homomorphic encryption allows for the processing of data while it is still encrypted. This addresses the need to access and use large amounts of personal data for multiple purposes&lt;/li&gt;
&lt;li&gt;&lt;b&gt;Transfer Learning&lt;/b&gt;: Instead of building a new model, transfer learning relies builds upon existing models that are applied to new related purposes or tasks. This has the potential to reduce the amount of training data needed. &lt;/li&gt;
&lt;li&gt;&lt;b&gt;RAIRD&lt;/b&gt;: Developed by Statistics Norway and the Norwegian Centre for Research Data, RAIRD is a national research infrastructure that allows for access to large amounts of statistical data for research while managing statistical confidentiality. This is achieved by allowing researchers access to metadata. The metadata is used to build analyses which are then run against detailed data without giving access to actual data.&lt;a href="#_ftn31" name="_ftnref31"&gt;&lt;sup&gt;&lt;sup&gt;[31]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ol&gt;
&lt;li&gt;Techniques to move beyond opaque algorithms&lt;/li&gt;
&lt;ol&gt;
&lt;li&gt;&lt;b&gt;Explainable AI (XAI): &lt;/b&gt;DARPA in collaboration with Oregon State University is researching how to create explainable models and explanation interface while ensuring a high level of learning performance in order to enable individuals to interact with, trust, and manage artificial intelligence.&lt;a href="#_ftn32" name="_ftnref32"&gt;&lt;sup&gt;&lt;sup&gt;[32]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; DARPA identifies a number of entities working on different models and interfaces for analytics and autonomy AI.&lt;a href="#_ftn33" name="_ftnref33"&gt;&lt;sup&gt;&lt;sup&gt;[33]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;b&gt;Local Interpretable Model Agnostic Explanations&lt;/b&gt;: Developed to enable trust between AI models and humans by generating explainers to highlight key aspects that were important to the model and its decision - thus providing insight into the rationale behind a model.&lt;a href="#_ftn34" name="_ftnref34"&gt;&lt;sup&gt;&lt;sup&gt;[34]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ol&gt; &lt;/ol&gt;
&lt;h3 style="text-align: justify; "&gt;Public Sector use of AI and Privacy&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;The role of AI in public sector decision making has been gradually growing globally across sectors such as law enforcement, education, transportation, judicial decision making and healthcare. In India too, use of automated processing in electronic governance under the Digital India mission, domestic law enforcement agencies monitoring social media content and educational schemes is being discussed and gradually implemented. Much like the potential applications of AI across sub-sectors, the nature of regulatory issues are also diverse.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Aside from the accountability framework discussed in the Srikrishna Committee report, the Puttaswamy judgment also provides a basis for governance of AI with respect to its concerns for privacy, in limited contexts. The sources of right to privacy as articulated in the Puttaswamy judgments included the terms ‘personal liberty’ under Article 21 of the Constitution. In order to fully appreciate how constitutional principles could apply to automated processing in India, we need to look closely at the origins of privacy under liberty. In the famous case of &lt;i&gt;AK Gopalan&lt;/i&gt; there is a protracted discussion on the contents of the rights under Article 21. Amongst the majority opinions itself, the opinion was divided. While Sastri J. and Mukherjea J. took the restrictive view that limiting the protections to bodily restraint and detention, Kania J. and Das J. take a broader view for it to include the right to sleep, play etc. Through &lt;i&gt;RC Cooper&lt;/i&gt;&lt;a href="#_ftn35" name="_ftnref35"&gt;&lt;sup&gt;&lt;sup&gt;[35]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; and &lt;i&gt;Maneka&lt;/i&gt;&lt;a href="#_ftn36" name="_ftnref36"&gt;&lt;sup&gt;&lt;sup&gt;[36]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;, the Supreme Court took steps to reverse the majority opinion in &lt;i&gt;Gopalan&lt;/i&gt; and it was established that that the freedoms and rights in Part III could be addressed by more than one provision. The expansion of ‘personal liberty’ has began in &lt;i&gt;Kharak Singh&lt;/i&gt; where the unjustified interference with a person’s right to live in his house, was held to be violative of Article 21. The reasoning in &lt;i&gt;Kharak Singh&lt;/i&gt; draws heavily from&lt;i&gt; Munn&lt;/i&gt; v. &lt;i&gt;Illinois&lt;/i&gt;&lt;a href="#_ftn37" name="_ftnref37"&gt;&lt;sup&gt;&lt;sup&gt;[37]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; which held life to be “more than mere animal existence.” Curiously, after taking this position &lt;i&gt;Kharak Singh&lt;/i&gt; fails to recognise a fundamental right to privacy (analogous to the Fourth Amendment protection in US) under Article 21. The position taken in &lt;i&gt;Kharak Singh&lt;/i&gt; was to extrapolate the same method of wide interpretation of ‘personal liberty’ as was accorded to ‘life’. &lt;i&gt;Maneka&lt;/i&gt; which evolved the test for enumerated rights within Part III says that the claimed right must be an integral part of or of the the same nature as the named right. It says that the claimed must be ‘in reality and substance nothing but an instance of the exercise of the named fundamental right’. The clear reading of privacy into ‘personal liberty’ in this judgment is effectively a correction of the inherent inconsistencies in the positions taken by the majority in Kharak Singh.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;The other significant change in constitutional interpretation that occurred in Maneka was with respect to the phrase ‘procedure established by law’ in Article 21. In Gopalan, the majority held that the phrase ‘procedure established by law’ does not mean procedural due process or natural justice. What this meant was that, once a ‘procedure’ was ‘established by law’, Article 21 could not be said to have been infringed. This position was entirely reversed in Maneka. The ratio in Maneka said that ‘procedure established by law’ must be fair, just and reasonable, and cannot be arbitrary and fanciful. Therefore, any infringement of the right to privacy must be through a law which follows the principles of natural justice, and is not arbitrary or unfair. It follows that any instances of automated processing for public functioning by state actors or others, must meet this standard of ‘fair, just and reasonable’.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;While there is a lot of focus internationally on what ethical AI must be, it is important that when we consider use of AI by the state, we pay heed to the existing constitutional principles which determine how AI must be evaluated against these standards. These principles however extend only to limited circumstances for protections under Article 21 are not horizontal in nature but only applicable against the state. Whether a party is the state or not is a question that has been considered several times by the Supreme Court and must be determined by functional tests. In our submission of the Justice Srikrishna Committee, we clearly recommended that where automated decision making is used for discharging of public functions, the data protection law must state that such actions are subject the the constitutional standards and are ‘just, fair and reasonable’ and satisfy the tests for both procedural and substantive due process. To a limited extent, the committee seems to have picked up the standards of ‘fair’ and ‘reasonable’ and made it applicable to all forms of processing, whether public or private. It is as yet unclear whether fairness and reasonableness as inserted in the bill would draw from the constitutional standard under Article 21. The report makes a reference to the twin principles of acting in a manner that upholds the best interest of the privacy of the individual, and processing within the reasonable expectations of the individual, which do not seem to cover the fullest essence of the legal standard under Article 21.&lt;/p&gt;
&lt;h3 style="text-align: justify; "&gt;Conclusion&lt;/h3&gt;
&lt;p style="text-align: justify; "&gt;The Srikrishna Committee Bill attempts to create an accountability framework for the use of emerging technologies including AI that is focused on placing the responsibility on companies to prevent harm. Though not as robust as found in the GDPR, the protections have been enabled through requirements such as fair and reasonable processing, ensuring data quality, and implementing principles of privacy of design. At the sametime, the Srikrishna Bill does not include provisions that can begin to address the  consumer facing ‘black box’ of AI by ensuring that individuals have information about the potential impact of decisions taken by automated means. In contrast, the GDPR has already taken important steps to tackle this by requiring companies to explain the logic and potential impact of decisions taken by automated means.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;Most importantly, the Bill gives the Data Protection Authority the necessary tools to hold companies accountable for the use of AI through the requirements of data protection audits. If enacted, it will have to be seen how these audits and the principle of privacy by design are implemented and enforced in the context of companies using  AI. Though the Bill creates a Data Protection Authority consisting of members that have significant experience in data protection, information technology, data management, data science, cyber and internet laws, and related subjects, these requirements can be further strengthened by having someone from a background of ethics and human rights.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;One of the responsibilities of the DPA under the Srikrishna Bill will be to monitor technological developments and commercial practices that may affect protection of personal data and promote measures and undertake research for innovation in the field of protection of personal data. If enacted, we hope that AI and solutions towards enhancing privacy in the context of AI like described above will be one of these focus areas of the DPA. It will also be important to see how the DPA develops impact assessments related to AI and what tools associated with the principle of Privacy by Design emerge to address AI.&lt;/p&gt;
&lt;hr style="text-align: justify; " /&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref1" name="_ftn1"&gt;&lt;sup&gt;&lt;sup&gt;[1]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://privacyinternational.org/topics/artificial-intelligence&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref2" name="_ftn2"&gt;&lt;sup&gt;&lt;sup&gt;[2]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.wired.com/story/our-machines-now-have-knowledge-well-never-understand/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref3" name="_ftn3"&gt;&lt;sup&gt;&lt;sup&gt;[3]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://iapp.org/news/a/ai-offers-opportunity-to-increase-privacy-for-users/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref4" name="_ftn4"&gt;&lt;sup&gt;&lt;sup&gt;[4]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://iapp.org/media/pdf/resource_center/GDPR_Study_Maldoff.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref5" name="_ftn5"&gt;&lt;sup&gt;&lt;sup&gt;[5]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://gdpr-info.eu/art-22-gdpr/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref6" name="_ftn6"&gt;&lt;sup&gt;&lt;sup&gt;[6]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://gdpr-info.eu/art-14-gdpr/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref7" name="_ftn7"&gt;&lt;sup&gt;&lt;sup&gt;[7]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.datatilsynet.no/globalassets/global/english/ai-and-privacy.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref8" name="_ftn8"&gt;&lt;sup&gt;&lt;sup&gt;[8]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.datatilsynet.no/globalassets/global/english/ai-and-privacy.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref9" name="_ftn9"&gt;&lt;sup&gt;&lt;sup&gt;[9]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://gdpr-info.eu/art-25-gdpr/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref10" name="_ftn10"&gt;&lt;sup&gt;&lt;sup&gt;[10]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://ico.org.uk/for-organisations/guide-to-the-general-data-protection-regulation-gdpr/accountability-and-governance/data-protection-impact-assessments/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref11" name="_ftn11"&gt;&lt;sup&gt;&lt;sup&gt;[11]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://gdpr-info.eu/art-21-gdpr/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref12" name="_ftn12"&gt;&lt;sup&gt;&lt;sup&gt;[12]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://gdpr-info.eu/art-22-gdpr/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref13" name="_ftn13"&gt;&lt;sup&gt;&lt;sup&gt;[13]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://gdpr-info.eu/art-14-gdpr/&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref14" name="_ftn14"&gt;&lt;sup&gt;&lt;sup&gt;[14]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt;Draft Data Protection Bill 2018 -  Chapter II section 9&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref15" name="_ftn15"&gt;&lt;sup&gt;&lt;sup&gt;[15]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 -  Chapter VII section 29&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref16" name="_ftn16"&gt;&lt;sup&gt;&lt;sup&gt;[16]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 -  Chapter VII section 33&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref17" name="_ftn17"&gt;&lt;sup&gt;&lt;sup&gt;[17]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 -  Chapter VII section 38&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref18" name="_ftn18"&gt;&lt;sup&gt;&lt;sup&gt;[18]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 -  Chapter VIII section 40&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref19" name="_ftn19"&gt;&lt;sup&gt;&lt;sup&gt;[19]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 -  Chapter X section 60&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref20" name="_ftn20"&gt;&lt;sup&gt;&lt;sup&gt;[20]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 -  Chapter II section 4&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref21" name="_ftn21"&gt;&lt;sup&gt;&lt;sup&gt;[21]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 - Chapter II section 5&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref22" name="_ftn22"&gt;&lt;sup&gt;&lt;sup&gt;[22]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 -  Chapter IX Section 45&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref23" name="_ftn23"&gt;&lt;sup&gt;&lt;sup&gt;[23]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 - Chapter XIV section 97&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref24" name="_ftn24"&gt;&lt;sup&gt;&lt;sup&gt;[24]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Draft Data Protection Bill 2018 - Chapter VII section 31&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref25" name="_ftn25"&gt;&lt;sup&gt;&lt;sup&gt;[25]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; Srikrishna Committee Report on Data Protection pg. 36 and 37. Available at: http://www.prsindia.org/uploads/media/Data%20Protection/Committee%20Report%20on%20Draft%20Personal%20Data%20Protection%20Bill,%202018.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref26" name="_ftn26"&gt;&lt;sup&gt;&lt;sup&gt;[26]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.ciosummits.com/Online_Assets_DocAuthority_Whitepaper_-_Guide_to_Intelligent_GDPR_Compliance.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref27" name="_ftn27"&gt;&lt;sup&gt;&lt;sup&gt;[27]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://jolt.law.harvard.edu/assets/articlePDFs/v31/31HarvJLTech217.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref28" name="_ftn28"&gt;&lt;sup&gt;&lt;sup&gt;[28]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://standards.ieee.org/content/dam/ieee-standards/standards/web/documents/other/ead_personal_data_v2.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref29" name="_ftn29"&gt;&lt;sup&gt;&lt;sup&gt;[29]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.datatilsynet.no/globalassets/global/english/ai-and-privacy.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref30" name="_ftn30"&gt;&lt;sup&gt;&lt;sup&gt;[30]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.artificial-intelligence.blog/news/capsule-networks&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref31" name="_ftn31"&gt;&lt;sup&gt;&lt;sup&gt;[31]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; http://raird.no/about/factsheet.html&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref32" name="_ftn32"&gt;&lt;sup&gt;&lt;sup&gt;[32]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.darpa.mil/attachments/XAIProgramUpdate.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref33" name="_ftn33"&gt;&lt;sup&gt;&lt;sup&gt;[33]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.darpa.mil/attachments/XAIProgramUpdate.pdf&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref34" name="_ftn34"&gt;&lt;sup&gt;&lt;sup&gt;[34]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; https://www.oreilly.com/learning/introduction-to-local-interpretable-model-agnostic-explanations-lime&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref35" name="_ftn35"&gt;&lt;sup&gt;&lt;sup&gt;[35]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;i&gt;R C Cooper&lt;/i&gt; v. &lt;i&gt;Union of India&lt;/i&gt;, 1970 SCR (3) 530.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref36" name="_ftn36"&gt;&lt;sup&gt;&lt;sup&gt;[36]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; &lt;i&gt;Maneka Gandhi&lt;/i&gt; v. &lt;i&gt;Union of India&lt;/i&gt;, 1978 SCR (2) 621.&lt;/p&gt;
&lt;p style="text-align: justify; "&gt;&lt;a href="#_ftnref37" name="_ftn37"&gt;&lt;sup&gt;&lt;sup&gt;[37]&lt;/sup&gt;&lt;/sup&gt;&lt;/a&gt; 94 US 113 (1877).&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/blog/the-srikrishna-committee-data-protection-bill-and-artificial-intelligence-in-india'&gt;https://cis-india.org/internet-governance/blog/the-srikrishna-committee-data-protection-bill-and-artificial-intelligence-in-india&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Amber Sinha and Elonnai Hickok</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Artificial Intelligence</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-09-03T13:29:12Z</dc:date>
   <dc:type>Blog Entry</dc:type>
   </item>


    <item rdf:about="https://cis-india.org/internet-governance/news/celebrating-one-year-of-the-justice-k-s-puttaswamy-v-union-of-india-judgment">
    <title>Celebrating One Year of the Justice K.S. Puttaswamy v. Union of India Judgment</title>
    <link>https://cis-india.org/internet-governance/news/celebrating-one-year-of-the-justice-k-s-puttaswamy-v-union-of-india-judgment</link>
    <description>
        &lt;b&gt;Shweta Mohandas was a panelist at the event, "Celebrating One Year of the Justice K.S. Puttaswamy v. Union of India Judgment", organised by Indian Council for Research on International Economic Relations, and the Centre for Communication Governance at National Law University Delhi. It took place on Friday, 24 August 2018 at India International Centre, New Delhi.&lt;/b&gt;
        &lt;p style="text-align: justify; "&gt;The event began with Dr. Usha Ramanathan's Opening remarks on the State of Privacy in India &amp;amp; the Challenges to Realising Puttaswamy’s Promise. This was then followed by two panel discussions, the first on Data Protection for a Free and Fair Digital Economy and the second on the Legacy of the Justice K.S. Puttaswamy v. Union of India Judgment. Shweta participated in the second panel.  More details of the event &lt;a class="external-link" href="https://ccgnludelhi.wordpress.com/2018/08/22/celebrating-one-year-of-the-puttaswamy-judgment-august-24-6-00-pm-iic/"&gt;here&lt;/a&gt;.&lt;/p&gt;
        &lt;p&gt;
        For more details visit &lt;a href='https://cis-india.org/internet-governance/news/celebrating-one-year-of-the-justice-k-s-puttaswamy-v-union-of-india-judgment'&gt;https://cis-india.org/internet-governance/news/celebrating-one-year-of-the-justice-k-s-puttaswamy-v-union-of-india-judgment&lt;/a&gt;
        &lt;/p&gt;
    </description>
    <dc:publisher>No publisher</dc:publisher>
    <dc:creator>Admin</dc:creator>
    <dc:rights></dc:rights>

    
        <dc:subject>Internet Governance</dc:subject>
    
    
        <dc:subject>Privacy</dc:subject>
    

   <dc:date>2018-08-30T02:53:48Z</dc:date>
   <dc:type>News Item</dc:type>
   </item>




</rdf:RDF>
